[
  {
    "path": ".gitignore",
    "content": "CMakeLists.txt.user*\n*.geany\n*.autosave\npackage-lock.json\nhtml-export/node_modules\nhtml-export/dist/SAMPLE_DATA.js\nhtml-export/.vscode\n.eslintrc.js\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.6)\n\nif (NOT (CMAKE_VERSION VERSION_LESS \"3.20\"))\n    cmake_policy( SET CMP0115 NEW )\nendif()\n\n# version applies to all released files: shournal, shournal-run, libshournal-shellwatch.so\n# and shell-integration-scripts (e.g. integration_ko.bash)\nset(shournal_version \"3.3\")\n\ncmake_policy( SET CMP0048 NEW )\nproject(shournal VERSION ${shournal_version} LANGUAGES CXX C)\n\nlist(APPEND CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/cmake\")\nfind_package(ShournalUtil REQUIRED)\n\nif (NOT DEFINED MSENTER_GROUPNAME)\n    set(MSENTER_GROUPNAME \"shournalmsenter\")\nendif()\nadd_definitions( -DSHOURNAL_MSENTERGROUP=\"${MSENTER_GROUPNAME}\")\n\n# No need to make configurable - user can override in /etc/shournal.d/kgroup\n# (or, not recommended, use a custom rule in /etc/udev/rules.d).\n# DO NOT CHANGE, it is documented in the README.\nset(GROUPNAME_SHOURNALK \"shournalk\")\n\n\n# Inside docker no kernel module may be installed,\n# but we default to using the host's kernel-module.\n# When only shournal-run-fanotify is desired, no need to\n# compile shournal-run.\n# -DSHOURNAL_EDITION={full, docker, ko, fanotify}\nif(NOT DEFINED SHOURNAL_EDITION)\n    set(SHOURNAL_EDITION \"full\")\nendif()\n\n\nif(NOT ${SHOURNAL_EDITION} MATCHES \"full|docker|ko|fanotify\")\n     message( FATAL_ERROR \"invalid SHOURNAL_EDITION passed: ${SHOURNAL_EDITION}\" )\nendif()\n\nset (CMAKE_CXX_STANDARD 11)\n\nif(CMAKE_COMPILER_IS_GNUCXX)\n    if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)\n        message(FATAL_ERROR \"GCC version must be at least 5.0!\")\n    endif()\nelse()\n    message(WARNING \"You are using an unsupported compiler. Compilation was only tested with GCC.\")\nendif()\n\nif ( CMAKE_COMPILER_IS_GNUCXX )\n    set(CMAKE_CXX_FLAGS  \"${CMAKE_CXX_FLAGS} -Wall -Wextra -Wunused-result -Werror=return-type\")\nendif()\n\n\nadd_definitions( -DSHOURNAL_VERSION=\"${shournal_version}\" )\n\n\nif (NOT EXISTS ${CMAKE_BINARY_DIR}/CMakeCache.txt)\n  if (NOT CMAKE_BUILD_TYPE)\n    set(CMAKE_BUILD_TYPE \"Release\" CACHE STRING \"\" FORCE)\n  endif()\nendif()\n\n\nIF(CMAKE_BUILD_TYPE MATCHES Release)\n    ADD_DEFINITIONS( -DQT_NO_DEBUG_OUTPUT=1)\n    SET(CMAKE_AR  \"gcc-ar\")\n    set(CMAKE_CXX_FLAGS_RELEASE \"${CMAKE_CXX_FLAGS_RELEASE} -s\")\nENDIF()\n\n# Profile purposes\nIF(CMAKE_BUILD_TYPE MATCHES RelWithDebInfo)\n    ADD_DEFINITIONS( -DQT_NO_DEBUG_OUTPUT=1)\n    set(CMAKE_CXX_FLAGS_RELWITHDEBINFO \"-O3 -g -DNDEBUG\")\nENDIF()\n\n# Meaningful stacktraces:\nSET (CMAKE_ENABLE_EXPORTS TRUE)\n\n\n# cmake policy: allow for hidden symbols in static libs\ncmake_policy( SET CMP0063 NEW )\nset(CMAKE_POLICY_DEFAULT_CMP0063 NEW)\n\nfunction(hide_static_lib_symbols staticLib)    \n    set_target_properties(${staticLib} PROPERTIES CXX_VISIBILITY_PRESET hidden)\n    set_target_properties(${staticLib} PROPERTIES CMAKE_VISIBILITY_INLINES_HIDDEN 1)\nendfunction(hide_static_lib_symbols)\n\n# append the content of f2 to f1\nfunction(append_to_file f1 f2)\n  file(READ ${f2} CONTENTS)\n  file(APPEND ${f1} \"${CONTENTS}\")\nendfunction()\n\n\n# Below code could be used to strip *all* symbols, however, we do it only\n# for the shared lib to allow for meaningful stacktraces in shournal and shournal-run\n# CMP0063 NEW allows hiding symbols also in static libraries\n# If cmake is too old, try to use compiler optinons directly or\n# print warning, if that also fails.\n# if (CMAKE_VERSION VERSION_LESS \"3.3\")\n#     IF (CMAKE_COMPILER_IS_GNUCXX OR\n#         \"${CMAKE_CXX_COMPILER_ID}\" STREQUAL \"Clang\")\n#         SET(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -fvisibility=hidden\")\n#         set(CMAKE_VISIBILITY_INLINES_HIDDEN 1)\n#     else()\n#         message(\"Warning - cannot hide all symbols of libshournal.so.\")\n#         message(\"Please upgrade cmake or use clang/gcc\")\n#     ENDIF()\n# else()\n#     cmake_policy( SET CMP0063 NEW )\n#     set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)\n#     set(CMAKE_CXX_VISIBILITY_PRESET hidden)\n#     set(CMAKE_VISIBILITY_INLINES_HIDDEN 1)\n# endif()\n\n\ninclude(GNUInstallDirs)\nset(shournal_install_dir_script ${CMAKE_INSTALL_FULL_DATAROOTDIR}/${PROJECT_NAME})\nset(shournal_install_dir_lib ${CMAKE_INSTALL_FULL_LIBDIR}/${PROJECT_NAME})\nset(shournal_install_dir_shournalk_src /usr/src/shournalk-${shournal_version})\n\nset(libshournal_fullname \"libshournal-shellwatch.so\")\nset(full_path_libshournal ${shournal_install_dir_lib}/${libshournal_fullname})\n\n\nset(CMAKE_INCLUDE_CURRENT_DIR ON)\nset(CMAKE_AUTOMOC ON)\n# qt resource files (.qrc):\nset(CMAKE_AUTORCC ON)\n\n\nfind_package(Qt5 COMPONENTS Core Sql Network REQUIRED)\n\ninclude_directories(\n    extern/tsl-ordered-map\n    extern/folly\n    extern/xxHash\n    )\n\n\nadd_subdirectory(\"extern/tsl-ordered-map\")\nset(XXHASH_BUNDLED_MODE ON)\nadd_subdirectory(extern/xxHash/cmake_unofficial EXCLUDE_FROM_ALL)\n\nadd_subdirectory(\"src\")\nadd_subdirectory(\"shell-integration-scripts\")\nadd_subdirectory(\"install\")\n\n# Kernel module\nif(${SHOURNAL_EDITION} MATCHES \"full|ko\")\n    add_subdirectory(\"kernel\")\nendif()\n\n# Turn on tests with 'cmake -Dtest=ON'.\n# To run the tests enter directory \"test\" within the build directory\n# and enter \"ctest\".\noption(test \"Build all tests.\" OFF)\n\nif (test)\n   add_subdirectory(\"test\")\nendif()\n\n# install license\ninstall(FILES\n    \"${CMAKE_CURRENT_SOURCE_DIR}/LICENSE\"\n    RENAME copyright # following Lintian\n    DESTINATION ${CMAKE_INSTALL_FULL_DOCDIR}\n    )\n\n############## Package creation using 'cpack' ##############\n\n# generic\nset(CPACK_GENERATOR \"DEB\")\nset(CPACK_PACKAGE_VERSION ${shournal_version})\nset(CPACK_PACKAGE_CONTACT \"Tycho Kirchner <tychokirchner@mail.de>\")\nSET(CPACK_RESOURCE_FILE_LICENSE \"${CMAKE_CURRENT_SOURCE_DIR}/LICENSE\")\n# If CPACK_INSTALL_PREFIX is not set, let it default to CMAKE_INSTALL_PREFIX\n# see also: https://stackoverflow.com/a/7363073/7015849\n# set(CPACK_SET_DESTDIR true)\n\nset(CPACK_PACKAGE_DESCRIPTION_SUMMARY\n   \"File-journal for your shell\"\n)\n\nset(SHOURNAL_CONFLICTS_LIST\n    \"shournal\" \"shournal-docker\"\n    \"shournal-ko\" \"shournal-fanotify\"\n)\n\nif(${SHOURNAL_EDITION} STREQUAL \"full\")\n    set(CPACK_PACKAGE_NAME \"shournal\")\n    set(edition_description \"full suite (all backends)\")\nelseif(${SHOURNAL_EDITION} STREQUAL \"ko\")\n    set(CPACK_PACKAGE_NAME \"shournal-ko\")\n    set(edition_description \"kernel backend only (fanotify backend not included)\")\nelseif(${SHOURNAL_EDITION} STREQUAL \"fanotify\")\n    set(CPACK_PACKAGE_NAME \"shournal-fanotify\")\n    set(edition_description \"fanotify backend only (no kernel module included)\")\nelseif(${SHOURNAL_EDITION} STREQUAL \"docker\")\n    set(CPACK_PACKAGE_NAME \"shournal-docker\")\n    set(edition_description \"docker-version to be installed inside containers\")\nelse()\n    message( FATAL_ERROR \"invalid cpack COMPONENT: ${SHOURNAL_EDITION}\" )\nendif()\n\nlist(REMOVE_ITEM SHOURNAL_CONFLICTS_LIST\n    \"${CPACK_PACKAGE_NAME}\"\n    )\nJOIN(\"${SHOURNAL_CONFLICTS_LIST}\" \", \" SHOURNAL_CONFLICTS)\n\n# CPACK_DEBIAN_PACKAGE_DESCRIPTION requires newlines\n# be indented by one space. For the sake of simplicity:\n# No new lines here:\nset(CPACK_PACKAGE_DESCRIPTION\n    \"Integrated tool to increase the reproducibility \\\nof your work on the shell: what did you do when and \\\nwhere and what files were modified/read. This package \\\nprovides the ${edition_description}.\")\n\n# deb specific\n# set(CPACK_GENERATOR \"DEB\")\n\nset(CPACK_DEBIAN_PACKAGE_DESCRIPTION \"${CPACK_PACKAGE_DESCRIPTION}\")\nexecute_process(COMMAND dpkg --print-architecture OUTPUT_VARIABLE DEB_ARCH\n    OUTPUT_STRIP_TRAILING_WHITESPACE)\nset(CPACK_DEBIAN_FILE_NAME\n    ${CPACK_PACKAGE_NAME}_${shournal_version}_${DEB_ARCH}.deb)\nset(CPACK_DEBIAN_PACKAGE_CONFLICTS \"${SHOURNAL_CONFLICTS}\")\nset(CPACK_DEBIAN_PACKAGE_HOMEPAGE \"https://github.com/tycho-kirchner/shournal\")\nset(CPACK_DEBIAN_PACKAGE_DEPENDS \"libc6 (>= 2.2), libstdc++6 (>= 5.0), libgcc1, \\\nlibqt5core5a (>= 5.6), libqt5network5, libqt5sql5-sqlite, libcap2, uuid-runtime\"\n)\nset(CPACK_DEBIAN_PACKAGE_SECTION \"utils\")\n\n# generate the postinst based on the groupname\nset(debPostinstPath \"${CMAKE_BINARY_DIR}/debian/postinst\")\nset(debPrermPath \"${CMAKE_BINARY_DIR}/debian/prerm\")\n\nset(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA\n    \"${debPostinstPath}\"\n    \"${debPrermPath}\"\n    )\nset(CPACK_DEBIAN_PACKAGE_CONTROL_STRICT_PERMISSION TRUE)\n\nif(${SHOURNAL_EDITION} MATCHES \"full|ko\")\n    set(CPACK_DEBIAN_PACKAGE_DEPENDS\n        \"${CPACK_DEBIAN_PACKAGE_DEPENDS}, dkms\")\n    append_to_file( \"${debPostinstPath}\" ${CMAKE_BINARY_DIR}/install/postinst-dkms )\n    append_to_file( \"${debPrermPath}\" ${CMAKE_BINARY_DIR}/install/prerm-dkms )\nendif()\n\n# call it *after* setting above variables, otherwise\n# generic .gz's are generated.\ninclude(CPack)\n\n\n\n"
  },
  {
    "path": "LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<http://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<http://www.gnu.org/philosophy/why-not-lgpl.html>.\n"
  },
  {
    "path": "README-compile.md",
    "content": "\n# Compile and install from source\n\n* Install gcc >= 5.0. Other compilers might work but are untested.\n* Install cmake >= 3.6 and make\n* For safe generation of uuids it is recommend to install uuidd (uuid-runtime)\n* Install qt-dev, uuid-dev, qt-sqlite-driver, Qt version >= 5.6.\n  *With a little effort, shournal could be modified to\n  support Qt version >= 5.3. Please open an issue, if that would\n  be helpful to you.*\n* To build the kernel-module the headers are also required\n  (linux-headers-$(uname -r))\n\n  *Packages lists*:\n\n  Debian:\n  ~~~\n  apt-get install g++ cmake make qtbase5-dev libqt5sql5-sqlite \\\n   uuid-dev libcap-dev uuid-runtime linux-headers-$(dpkg --print-architecture) dkms\n  ~~~\n  Ubuntu:\n  ~~~\n  apt-get install g++ cmake make qtbase5-dev libqt5sql5-sqlite \\\n   uuid-dev libcap-dev uuid-runtime dkms \\\n   linux-headers-generic # or linux-headers-generic-hwe-$(lsb_release -rs) on HWE\n  ~~~\n  Opensuse:\n  ~~~\n  zypper install gcc-c++ cmake make libqt5-qtbase-devel \\\n   libQt5Sql5-sqlite libuuid-devel libcap-devel uuidd \\\n   kernel-default-devel dkms\n  ~~~\n  Arch Linux:\n  ~~~\n  yay -S gcc cmake make qt5-base uuid libcap linux-headers dkms\n  ~~~\n\n  CentOS (note: CentOS 7 as of July 2019 only ships with gcc 4.8\n  -> compile gcc >= 5.0 yourself. cmake3 and cmake are seperate packages\n  where cmake in version 2 is the default. Please ensure to compile with\n  cmake3. The kernel 3.10 is too old for *shournal*'s kernel-module.\n  Either install a newer one or stick with the fanotify-edition):\n  ~~~\n  yum install gcc-c++ cmake3 make qt5-qtbase-devel libuuid-devel \\\n  libcap-devel uuidd kernel-devel dkms\n  ~~~\n\n* In the source-tree-directory, enter the following commands to\n  compile and install. By default `SHOURNAL_EDITION` `full` is built (see below).\n  Supported options include `full, docker, ko, fanotify`.\n  The `ko` (kernel module) edition does not install the fanotify backend\n  which may be desirable for security reasons as the setuid-binary\n  `shournal-run-fanotify` is omitted. For a description of the other editions\n  refer to [Binary releases](./README.md#binary-releases).\n  ~~~\n  mkdir -p build\n  cd build\n  # If you later want to generate a deb-package, it is recommended\n  # to use /usr as prefix: -DCMAKE_INSTALL_PREFIX=/usr\n  cmake -DSHOURNAL_EDITION=full ..\n  make\n  # as root:\n  make install\n  # or if using a Debian-based distribution, generate a .deb-package:\n  cpack -G DEB\n  ~~~\n* To also compile unit- and integration-tests, run cmake with\n  `-Dtest=ON` (debugging symbols via `-DCMAKE_BUILD_TYPE=Debug`). This generates\n  a `test/runTests` binary in the build dir. Without any arguments, it runs\n  unit-tests. Integrations-tests are, for instance, executed using:\n  ~~~\n  # SHOURNAL_BACKEND=fanotify|ko\n  SHOURNAL_BACKEND=fanotify test/runTests --integration --shell 'bash -i'\n  ~~~\n\n\n**After compile and install**: <br>\nIf you created a .deb-package, please see\n[Binary releases](./README.md#binary-releases). **Otherwise:**\n\n**Kernel-module backend** <br>\nFor a quick test, the module can be loaded right from the build-tree:\n`$ insmod kernel/shournalk.ko`. <br>\nTo install the kernel-module (not built in SHOURNAL_EDITION's\n*docker* and *fanotify*) it is recommended to install it using dkms, e.g.:\n~~~\ndkms build shournalk/2.4    # adjust version as needed.\ndkms install shournalk/2.4\n# and load it with\nmodprobe shournalk\n~~~\nDepending on your distribution the dkms service may be disabled, thus\nafter a kernel-update shournal stops working. At least on\nOpensuse Tumbleweed it can be enabled with\n~~~\nsystemctl enable dkms\n~~~\n\n**fanotify backend** <br>\nAdd a group to your system, which is primarily needed for\nthe shell-integration:\n\n  ```groupadd shournalmsenter```\n\nHowever, *do not add any users to that group*. It is part of a permission\ncheck, where root adopts that gid (within shournal).\nIf you don't like the default group name, you can specify your own: at\nbuild time pass the following to cmake:\n\n  ```-DMSENTER_GROUPNAME=$your_group_name```\n\nFor **further post-install steps** please see\n[Binary releases](./README.md#binary-releases). Please note\nthat file-paths may need to be adjusted, e.g. the location of\nthe `SOURCE_ME.$shell_name` scripts after `make install` is typically\n`/usr/local/share/shournal/`, not `/usr/share/shournal/`.\n\n\nTo **uninstall**, after having installed with `make install`, you can\nexecute <br>\n`xargs rm < install_manifest.txt`, but see\n[here](https://stackoverflow.com/a/44649542/7015849) for the\nlimitations. <br>\nTo uninstall the kernel-module backend: <br>\n`sudo dkms remove shournalk/2.4` (adjust version as needed).\n"
  },
  {
    "path": "README-shell-integration.md",
    "content": "\n# Shell integration for shournal\n\n\n\n## Basic setup (interactive)\nAfter installation, to start observing your *interactive* shell-sessions\nappend the following to your shell's rc: <br>\n**~/.bashrc** <br>\n~~~\nHISTCONTROL=ignoredups:erasedups # NOT ALLOWED: ignorespace,ignoreboth\nsource /usr/share/shournal/SOURCE_ME.bash\nSHOURNAL_ENABLE\n~~~\n\n**~/.zshrc** <br>\n~~~\nsource /usr/share/shournal/SOURCE_ME.zsh\nSHOURNAL_ENABLE\n~~~\n\nLaunch a new shell afterwards and check whether it's working:\n~~~\n$ echo foo > bar\n$ shournal --query --wfile bar\ncmd-id 66075 $?=0 2021-11-02 14:23 $ echo foo > bar\nWorking directory: /home/tycho\nsession-uuid 3hIZtDwhEey5WPDVv9W/Cw==\n  1 written file:\n     /home/tycho/bar (4 bytes) Hash: 8087352826690557229\n$ # or just look into the history:\n$ shournal --query --history 3\n# ...\n~~~\n\n\nThe shell-integration injects code into\n`PROMPT_COMMAND`, `PS0` and `PS1` (bash) or the `preexec/precmd_functions`\n(zsh), so please do not overwrite those after having enabled shournal.\nFurther basic history functionality must be available, e.g. in bash\nHISTCONTROL must not ignore commands with leading spaces (see above).\nshournal's shell integration checks the typical variables and gives\nhints, if there is need for action.\n\nOther commands include <br>\n`SHOURNAL_DISABLE` to disable the observation <br>\n`SHOURNAL_PRINT_VERSIONS` to print the version of each component <br>\n`SHOURNAL_SET_VERBOSITY` to change the default verbosity (\"dbg, info,\nwarning, critical\"). For dbg, shournal must have been compiled with\ndebugging symbols. A verbosity higher than *warning* is not recommended.\n\n\n\n## Advanced setup (non-interactive)\nTo also observe non-interactive commands executed via ssh\n~~~\nssh localhost echo foo\n~~~\nor the *Sun Grid Engine* (SGE) the following setup is recommended:\n<br> <br>\n**bash** <br>\nPut the following near the **beginning** of your bashrc:\n~~~\nif [[ -n ${SGE_O_WORKDIR+x} || (\n     -n ${BASH_EXECUTION_STRING+x} &&\n    ( -n ${SSH_CLIENT+x} || -n ${SSH_TTY+x} )\n    ) ]]; then\n    source /usr/share/shournal/SOURCE_ME.bash\n    SHOURNAL_ENABLE\nfi\n~~~\nIn particular that code has to run before the sourcing of ~/.bashrc\nstops due to a negative interactive-check. For example, some distributions\nplace the following near the top of the bashrc:\n~~~\ncase $- in\n    *i*) ;;\n      *) return;;\nesac\n~~~\n\n**zsh** <br>\nPut the following into ~/.zprofile\n~~~\nif [[ -n ${SGE_O_WORKDIR+x} || (\n    -n ${ZSH_EXECUTION_STRING+x} &&\n    ( -n ${SSH_CLIENT+x} || -n ${SSH_TTY+x} )\n    ) ]]; then\n    source /usr/share/shournal/SOURCE_ME.zsh\n    SHOURNAL_ENABLE\nfi\n~~~\nNote that depending on your server environment, this requires zsh to be\nexecuted as login shell, e.g. <br>\n`ssh HOST zsh -l -c command`. Alternatively\nyou may use ~/.zshenv but beware that this file is always sourced, also\nduring `zsh -c ':'` invocations on the interactive command-line, so at least\nan additional check for <br>\n`[ $SHLVL -eq 1 ]` is recommended.\n\nFor cluster software systems other than SGE, you may\n`export SHOURNAL_IS_CLUSTERJOB=true`, before `SHOURNAL_ENABLE`, if and\nonly if the shell is about to execute a cluster job. Note\nthat in this case, shournal performs a re-execution of the current\ncommand and only returns control flow after flushing the database, because\ncluster software systems tend to kill background processes, once the\nmain job script finished. To totally disable cluster job detection,\nset `SHOURNAL_NO_CLUSTER_JOB_DETECT=true` before `SHOURNAL_ENABLE`.\n\n\n## Prerequisites of the fanotify backend\nIf the *fanotify* backend is used, please ensure the following:\n* The shell must be linked dynamically against (g)libc (default case,\n  can be tested e.g. with <br>\n  `file $(which bash) | grep \"dynamically linked\"` ).\n* Sourcing of SOURCE_ME.$shell must be within the shell's rc-file.\n* `SHOURNAL_ENABLE` should be within the shell's rc, because\n  on the very first enable the shell is re-executed, purging all non-exported\n  variables.\n* For non-interactive commands `SHOURNAL_ENABLE` must be called\n  before the actual execution begins.\n\nNote that the kernel module backend does not have those prerequisites\nand should be preferred in most cases.\n\n\n\n## Updates\nIf the shell-integration is running while shournal is updated, it is recommended,\nto restart your shell. A more elegant way than logout-login might be to `exec` your $shell.\n\n\n\n## FAQ\n* **How to obtain the value of variables?**. <br>\n  If shell-variables are used within a command, shournal's reports might\n  not seem to be very helpful. However, the shell-integration assigns\n  each shell-session a unique identifier (uuid).\n  In the likely case that the variable was\n  assigned *during that session*, you might be able to obtain its value.\n  This of course only works, if SHOURNAL_ENABLE was called, *before*\n  a variable was assigned. Example: <br>\n  `shournal --query --shell-session-id 'L/932KZTEemRB/dOGB9LOA==' | grep var_name`\n* **What about new, nested shell-sessions**? <br>\n  By *new shell-sessions* it is meant to call e.g. `bash` within an already\n  running bash-process. What happens next really depends on whether the\n  shell is itself **observed** by shournal or not (e.g. whether\n  `SHOURNAL_ENABLE` is within the .bashrc or not). On calling\n  `SHOURNAL_ENABLE` file-events are then considered to belong to the\n  new shell-session and are no longer reported to the original\n  observation-process of the caller. If a **non-observed** shell\n  is a called, shournal's later report will not be very helpful: all\n  file-modifications caused by that process will yield the plain\n  shell-command (and not individual commands possibly entered\n  within the new shell session).\n\n\n\n## Limitations\n* File-operations (redirections) which spread over **multiple** command-sequences\n  within the **interactive shell** might lead to surprising (*kernel module backend*)\n  or incorrect (*fanotify backend*) results. <br>\n  Example:\n  ~~~\n  $ exec 3> /tmp/foo  # open fd 3\n  $ echo \"test\" >&3\n  $ exec 3>&-  # close fd 3.\n  ~~~\n  In case of the *kernel module backend* as usual the close event is\n  tracked, however `shournal -q -wf /tmp/foo` prints only the command\n  `exec 3>&-`. By using the shell-session uuid it should be possible\n  to reconstruct those cases. <br>\n  In case of the *fanotify backend* the close-event is lost.\n* **Additional limitations of the fanotify-backend**: <br>\n  Filesystem-events of asynchronously launched processes, which close the inherited\n  shournal-socket, might be lost, because an external shournal-run process\n  waits until all instances of that socket are closed.\n  Steps to reproduce: In an *observed* shell-session enter <br>\n  `bash -c 'eval \"exec $_SHOURNAL_SOCKET_NB>&-\"; sleep 1; echo foo > bar' &` <br>\n  Note that e.g. in *Python* processes launched via its\n  *subprocess*-module do not inherit file descriptors by default.\n  There seems to be no general solution to this problem, but in most cases\n  there should be some mechanism to wait for the processes to finish,\n  within the interactive shell-session or a script.\n* For further limitations please also read the general\n  [README](/../../).\n\n\n\n## Motivation\n\nFor a general introduction about the data and meta-data *shournal* stores\nplease visit the general [README](/../../).\n\nHaving to type *shournal* before every single command one wants\nto observe can be tiresome. Another typing-overhead would be\nintroduced by using pipes or redirections.\nConsider the following **broken** example:\n\n    shournal --exec echo hi > foo    # Don't do this.\n\nAs many shell users know the redirection applies to the whole command,\nwhile shournal itself only observes \"echo hi\". The file modification event ('hi'\n  written to 'foo')\nis hence **not** tracked by shournal.\nTo actually observe such a command\none must rather type\n\n    shournal --exec sh -c 'echo hi > foo'\n\nThat's annoying, right?\n\nTherefore before observing one or multiple commands,\n`source` the respective integration-file within your shell's rc\n(e.g. .bashrc) and type\n\n    SHOURNAL_ENABLE\n\nThat's (almost) all. Forget about *shournal* until needed\n( e.g. you want to know how a certain file was created).\n\n\n"
  },
  {
    "path": "README.md",
    "content": "\n\n![shournal logo](images/shournal.png)\n\n## A (file-) journal for your shell\n\n**Log shell-commands and used files. Snapshot executed scripts. Fully automatic.** <br>\n*There are two kinds of people: those who backup, and those who have never\n lost their data.* <br>\n\n\n~~~\n$ SHOURNAL_ENABLE\n$ cat demo.sh\n#!/usr/bin/env bash\necho hi | tee out.log\n$ ./demo.sh\nhi\n$ shournal -q --wfile out.log\ncmd-id 2 $?=0 2022-11-08 08:46 $ ./demo.sh\nWorking directory: /home/user\n  1 written file:\n     /home/user/out.log (3 bytes) Hash: 15349503233279147316\n  1 read file:\n     /home/user/demo.sh (42 bytes) Hash: 13559791986335963073 id 1\n          #!/usr/bin/env bash\n          echo hi | tee out.log\n~~~\n\n***shournal* records that `out.log` was written by the command `./demo.sh` and\ncreated a backup of the script `demo.sh` because it was read by\nthe bash interpreter.**\n\n*shournal* does not guess the files - it asks the Linux kernel. It's fast enough,\nsee [Overhead](#overhead).\n\n\nAfter installation and easy setup of the\n[shell-integration](./README-shell-integration.md) the following questions\nmay be answered within seconds:\n* What files were modified, read or executed by a command? Or reverse: What shell-commands\n  were used to create/modify or read from a certain file?\n* You executed a script. What was the script-content by the time it was called?\n* The command read a config-file - which one, and what was in it?\n* The command ran for a long time - can a re-execution be avoided (s. `--stat`)?\n* What other commands were executed during the same shell-session?\n* What about working directory, command start- and end-time or the\n  exit status ($?) ?\n\n\nBesides output on the command-line in a human-readable format (or JSON)\nyou can export (parts of) your command-history into\na standalone html-file where it is displayed in an interactive\ntime-line-plot. Further miscellaneous statistics are displayed in\nbar-plots, e.g. the commands with most file-modifications.\n\nUsing the external software\n[shournal-to-snakemake]( https://github.com/snakemake/shournal-to-snakemake)\nan observed shell-command-series can be directly transformed into rules for\nthe [*Snakemake workflow engine*](https://github.com/snakemake/snakemake),\na tool to *create reproducible and scalable data analyses*.\n\n*shournal* runs on GNU/Linux or Microsoft Windows via\nthe Windows Subsystem for Linux (WSL) using its *fanotify* edition.\n\nFor a more formal description please also check out our paper <br>\nKirchner, T., Riege, K. & Hoffmann, S. *Bashing irreproducibility with shournal*.\nSci Rep 14, 4872 (2024). https://doi.org/10.1038/s41598-024-53811-9\n\n<br>\n\n![Example session animation](images/shournal-example-session.gif)\n\n## Examples\nPlease note: below examples make use of the\n[shell-integration](./README-shell-integration.md). <br>\n\n* Create a file and ask shournal, how it came to be:\n  ~~~\n  $ SHOURNAL_ENABLE # monitor all commands using the shell-integration\n  $ echo hi > foo\n  $ shournal --query --wfile foo\n  cmd-id 1 $?=0 2019-05-14 10:19 $ echo hi > foo\n    1 written file:\n       /home/user/foo (3 bytes) Hash: 15349503233279147316\n\n  ~~~\n* shournal can be configured, to store *specific* read files, like shell-scripts,\n  within it's database. Sometimes old script versions are of interest. Query\n  by **read filename** and optionally restore the files with `--restore-rfiles`:\n  ~~~\n  $ shournal -q --rname demo.sh\n  cmd-id 34 $?=0 2022-04-21 15:15 $ ./demo.sh\n    1 read file:\n       /home/user/demo.sh (34 bytes) Hash: 16696055267278105544 id 3\n            #!/usr/bin/env bash\n            echo version1\n  cmd-id 35 $?=0 2022-04-21 15:15 $ ./demo.sh\n    1 read file:\n       /home/user/demo.sh (34 bytes) Hash: 17683376525180966954 id 4\n            #!/usr/bin/env bash\n            echo version2\n  $ shournal -q --rname demo.sh --restore-rfiles # restore read files\n  ...\n  2 file(s) restored at /tmp/shournal-restore-user\n  ~~~\n* List all commands which contained the string `demo` (<kbd>%</kbd> is wildcard):\n  ~~~\n  $ shournal -q -cmdtxt %demo%\n  cmd-id 1 $?=0 2022-04-20 15:46 $ cat demo.sh\n  ...\n  cmd-id 2 $?=0 2022-04-20 15:46 $ ./demo.sh\n  ...\n  ~~~\n* Are input files up to date, such that re-execution of the command can be\n  avoided? Add `--stat` to the query, reporting current file statuses as\n  U (up to date), M (modified), N (not exist) ERROR (in case of an error) or NA\n  (not queried, only using json).\n  ~~~\n  $ cat foo > bar\n  $ shournal -q -wf bar --stat\n  cmd-id 1 $?=0 2025-02-10 11:38-11:38 $ cat foo > bar\n  ...\n  1 written file:\n     /home/tycho/bar (3 bytes) Hash: 15349503233279147316 U\n  1 read file:\n     /home/tycho/foo (3 bytes) Hash: 15349503233279147316 id 404002 U\n  ~~~\n  To query only for changed input files, execute\n  ~~~\n  shournal -q -wf bar --stat --output-format json | grep -F 'COMMAND:' | \\\n    sed -n 's/COMMAND://p' | \\\n    jq -r '.fileReadEvents | .[] | .status + \" \" + .path' | grep -v ^U\n  ~~~\n* What commands were executed at the current working directory?\n  ~~~\n  shournal --query -cwd \"$PWD\"\n  ~~~\n* What commands were executed within a specific shell-session? The\n  uuid can be taken from the command output of a previous query.\n  ~~~\n  shournal --query --shell-session-id $uuid\n  ~~~\n* Find out the value of a variable.\n  For instance, the command `echo $foo > bar` was executed in the shell-session\n  with id `puLvkEizEe6CgvXjQlmnIQ==`. If `foo` was set within that shell\n  session, its value can often be retrieved by\n  ~~~\n  shournal -q -sid puLvkEizEe6CgvXjQlmnIQ==  | fgrep 'foo='\n  ~~~\n* For the full list of query-options, please enter\n  ~~~\n  shournal --query --help\n  ~~~\n\nInstead of printing the `--query`-results to terminal, you can also create\nfancy html-plots, by appending `--output-format html -o out.html`.\nUse an ordinary web-browser for display.\n\n\n## Installation\n\n### Binary releases\nFor **Debian/Ubuntu-based** distributions .deb-packages are available on the\n[release-page](https://github.com/tycho-kirchner/shournal/releases/latest).\nThree different editions are provided for different use-cases: most users will\nwant to install *shournal* on a real host (or virtual machine) and\n*shournal-docker* [inside Docker](#running-inside-docker)\n(or another container platform).\n*shournal-fanotify* does not contain the kernel backend and\nis targeted at institutions where the usage of *out-of-tree kernel-modules*\nis discouraged. <br>\nOnly LTS-releases are officially supported, the packages are known to work\nfrom Debian 10 (Buster) and Ubuntu 18.04 (Bionic) onwards.\nBefore installing *shournal* including its kernel backend, make\nsure, the kernel headers are installed: <br>\n**Ubuntu**: `apt install linux-headers-generic` <br>\n**Ubuntu** with [HWE](https://askubuntu.com/questions/248914/what-is-hardware-enablement-hwe):\n`apt install linux-headers-generic-hwe-$(lsb_release -rs)` <br>\n**Debian**: `apt install linux-headers-$(dpkg --print-architecture)` <br>\nInstall deb-packages as usual, e.g. <br>\n`sudo apt install ./shournal_2.2_amd64.deb` <br>\nTo enable the shell-integration:\n* for *bash*: put the following to the end of your ~/.bashrc <br>\n`source /usr/share/shournal/SOURCE_ME.bash` <br>\n* for *zsh*: put the following to the end of your ~/.zshrc <br>\n`source /usr/share/shournal/SOURCE_ME.zsh` <br>\nand run `SHOURNAL_ENABLE` afterwards.\n\nFor **any Linux**, a flat binary is available on the\n[release-page](https://github.com/tycho-kirchner/shournal/releases/latest)\nto be used without installation:\n\n~~~\ntar -xf shournal-fanotify*.tar.xz\ncd shournal-fanotify/\nsudo groupadd shournalmsenter\nsudo chown root shournal-run-fanotify && sudo chmod u+s shournal-run-fanotify\n./shournal-run-fanotify -e echo Hello World\n\n# Source shournal's shell integration from bashrc/zshrc, e.g.\n# echo \"source '$PWD/SOURCE_ME.bash'\" >> ~/.bashrc\n# echo \"source '$PWD/SOURCE_ME.zsh'\" >> ~/.zshrc\n# Enable with: SHOURNAL_ENABLE.\n~~~\n\nAn **update** of *shournal* should be performed after all users have\nlogged out, because the shell integrations need to be resourced.\nFurther in case of the *kernel module* backend unloading the old\nversion stops all running observations.\n\n**After installation**:\nDepending on your distribution, additional steps might be necessary to\nenable the (recommended) uuidd-daemon. If systemd is in use, one\nmay need to:\n\n    systemctl enable uuidd\n    systemctl start uuidd\n\n\nAdd yourself or other users to the group *shournalk*: <br>\n`sudo adduser $USER shournalk` (relogin to take affect). <br>\nYou may override this group:\n~~~\nmkdir -p /etc/shournal.d/\necho GROUPNAME > /etc/shournal.d/kgroup\n~~~\nreplacing GROUPNAME with the value of your choice. This rule takes\ninto effect the next time shournal's kernel module is loaded ( so\ncall e.g. `modprobe -r shournalk; modprobe shournalk` or reboot).\n\nMore details and advanced options (logging commands executed via ssh)\ncan be found [here](./README-shell-integration.md).\n\n\n### Compile and install from source\nPlease refer to the instructions found within the\n[compile-README](./README-compile.md).\n\n\n\n## FAQ\n* **Does shournal track file rename/move operations?** <br>\n  No, but most often it should not be a problem. Using the\n  `--wfile` commandline-query-option, shournal finds the stored command\n  by content (size, hash) and mtime, not by its name.\n  For the name, `--wname` can be used.\n  More concrete:\n  ~~~\n  shournal --exec sh -c 'echo foo > bar; mv bar bar_old'\n  ~~~\n  Querying for bar_old by content (`--wfile`-option) yields exactly\n  the given command, however, `--wname bar_old` does **not** work\n  (`--wname bar` of course works). To use the bar_old *file name*\n  (and not content) as basis for a successful query, in this case\n  `--command-text -like '%bar_old%'` can be used.\n* **What happens to an appended file?** <br>\n  How to get a \"modification history\"?\n  Please read above rename/move-text first.\n  Appending to a file is currently handled as if a new one was created -\n  only the last command, which modified a given file can be found with\n  good certainty (by file **content**).\n  However, querying by path/file**name** works.\n  If the file was appended *and* renamed, things get more complicated.\n* **To track files, they can be hashed. Is that slow for big files?** <br>\n  No, because per default only certain small parts of the file are hashed.\n* **What does the following message mean and how to get rid of it?**: <br>\n  `fanotify_mark: failed to add path /foobar ... Permission denied`.\n  This message might be printed on executing a command with shournal.\n  Most probably the administrator mounted a filesystem object for which you don't have\n  permissions, thus you cannot *monitor* file events.\n  In this case you cannot perform file operations at this path\n  anyway, so it should be safe to silence this warning by adding the\n  path within the config-file in section `[mounts]`. If you want to ignore all\n  fanotify_mark permission errors, you can set the flag in section\n  `[mounts]`:\n  ~~~\n  [mounts]\n  ignore_no_permission = true\n  ~~~\n\n## Configuration\nshournal stores a self-documenting config-file typically at\n~/.config/shournal\nwhich is created on first run. It can be edited either directly with\na plain text editor or via `--edit-cfg`.\nFor completeness, the most important points are listed here as well.\n* Write- and read events can be configured, so only events occurring at\n  specific (include-)paths are stored. Put each path into a separate\n  line, all paths being enclosed\n  by triple quotes:\n  ~~~\n  include_paths = '''\n    /home/me\n    /media\n  '''\n  ~~~\n  Each exclude_path should be a sub-path of an include path.\n* Note that by default, there\n  is a limit on the number of logged events per command (max_event_count).\n  Read files (e.g. scripts) can **further** be configured\n  to be stored within shournal's database.\n  Files are only stored, if the configured max. file-size, file extension\n  (e.g. sh) and mimetype (e.g. application/x-shellscript) matches.\n  To find a mimetype for a given file you should use <br>\n  `shournal --print-mime test.sh`.\n  The correspondence of mimetype and file extension\n  is explained in more detail within the config-file.\n  Further, at your wish, read files are only stored if *you* have write permission for them\n  (not only read) - often system-provided scripts (owned by root) are not of particular\n  interest.\n\n  shournal will not store more read files per command, than max_count_of_files.\n  Matching files coming first have precedence.\n\n\n## Running inside Docker\nTo use *shournal* within Docker (or another container platform),\ndepending on the backend the following steps are necessary: <br>\n**kernel module backend** <br>\nInstall *shournal* on the host and *shournal-docker* inside the container.\nFor *unprivileged* containers *sysfs* is mounted readonly. In this case\ncreate a bindmount from /sys/kernel/shournalk_root to\n/tmp/shournalk-sysfs, e.g. <br>\n`docker run ... -v /sys/kernel/shournalk_root:/tmp/shournalk-sysfs`.\n\n**fanotify backend** <br>\nInstall *shournal-docker* (or *shournal-fanotify*) inside docker.\nFor *unprivileged* containers the capabilities SYS_ADMIN, SYS_PTRACE and\nSYS_NICE are required, e.g. <br>\n`docker run ... --cap-add SYS_ADMIN --cap-add SYS_PTRACE --cap-add SYS_NICE`. <br>\nYou may need to [configure the backend](#backend-configuration).\n\n\n## Running on a Amazon AWS EC2 instance\nIn order to run *shournal* on a Amazon AWS EC2 instance it may be\nnecessary, to enable additional software package repositories. For\nUbuntu 22.04 on a t3.micro instance enter the following commands before\ninstalling *shournal*\n~~~\nsudo add-apt-repository universe\nsudo apt update\n~~~\n\n\n## Backend configuration\nshournal provides two backends, a custom *kernel module* and *fanotify*.\nThe *kernel module* is used by default, except the *shournal-fanotify*\nedition is installed, where only the *fanotify* backend is\navailable. In general it is recommended to stick with the *kernel module*\nas it is faster and has less interference with the process environment -\nfor example no new mount namespaces have to be created and no file\ndescriptor inheritance is necessary to wait for the end of a process\ntree. See also:\n[shell-integration](./README-shell-integration.md#limitations). <br>\nIf both backends are installed you may configure the default one globally\nby creating the file `/etc/shournal.d/backend` or for each user by creating\n`~/.config/shournal/backend` with content `ko` or `fanotify`.\n\n\n## Disk-space - get rid of obsolete file-events\nDepending on the file-activity of the observed commands, shournal's\ndatabase will sooner or later grow. When you feel that enough time\nhas passed and want to get rid of old events, this can be done by e.g.\n`shournal --delete --older-than 1y`\nwhich deletes all commands (and file-events) older than one year.\nMore options are available, see also\n`shournal --delete --help`\n\n\n## Remote file-systems\n* *shournal* is able to monitor file events of specific processes (PID's).\n  Therefore, remote filesystems such as NFS or sshfs can be observed as\n  long as *shournal* runs on the same (virtual) machine as the observed\n  process. Consequently file events *another kernel* performs are lost.\n* For sshfs in case of the *fanotify* backend it is necessary,\n  to add ```-o allow_root``` to the sshfs-options,\n  otherwise permission errors during ```fanotify_mark``` are raised.\n  See also: https://serverfault.com/a/188896\n\n\n## Security\n### kernel-module backend\nIn the kernel module it is ensured that each user is only allowed to\nmonitor his/her own processes. Further, the kernel thread, which processes\nfile events, runs with effective caller credentials and checks\nallowed accesses on a per-file basis. Memory allocations are cgroup-aware,\neven for reading (in case of hashing) and writing (in case of logging)\nfiles.\n\n### fanotify backend\n*shournal-run-fanotify* is a so called \"setuid\"-program: whenever a regular user calls it, it runs\nwith root-permissions in the first place. As soon as possible, it runs effectively with user\npermissions though.\nIt must be setuid for two reaons:\n* fanotify requires root for initializing, because it is in\n  principle able, to **forbid** a process to access a file. shournal does not make use\n  of this feature so this is not a real security concern.\n* unsharing the *mount namespace* requires root, because setuid-programs *could* still refer\n  to seemingly obsolete mounts. This means that under awkward circumstances an unmount-event,\n  which has security-relevant consequences (e.g. mounting a new file to /etc/shadow) might not\n  propagate to processes which run in other mount namespaces.\n  To make sure mount-propagation applies, **all mounts, which carry setuid-binaries\n  or files they refer to, should be mounted *shared***, or no (security-relevant)\n  mount/unmount events should occur, after the first shournal-process started.\n  Shared mounts are the default in all recent distributions I know of.\n  See also\n  man 7 mount_namespaces and\n  [shared subtrees](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).\n\n\n\n\n\n## Limitations\nProcesses can communicate via IPC (inter-process-communication).\nIf the observed process *A* instructs the **not** observed process *B*\nvia IPC to modify a file, the filesystem-event is not registered by\n*shournal*.\n\nFor performance reasons, all files opened with write-permissions\nare reported as *written* by shournal, irrespective of whether\nthe process actually wrote to it. By using file size and content (hash)\nyou should be able to cover those cases.\n\nThe provided timestamp is determined shortly after a file was\nclosed. Note that it is possible that some other process has\nwritten to it in between. This however is only a\nproblem, if that other process was itself **not** observed.\n\nWhether memory mapped (see mmap(2) ) file-events are reported correctly\ndepends on **when** the underlying file-descriptor is closed. It is thus\napplication dependent and does not work in general.\n\n### Additional limitations of the fanotify backend\nThe file observation only works, if the process does not unshare the\nmount-namespace itself, e.g. monitoring a program started\nvia *flatpak* fails.\nFor further limitations please visit the fanotify manpage.\n\n\n## Known Issues\n* on NFS-storages: file events are lost, if the user does not have\n  read-permissions while a file is closed.\n  Steps to reproduce:\n  - open a file readable for you on a NFS storage\n  - chmod it 000\n  - close it --> the event is lost\n\n\n\n## How does it work?\nshournal attempts to deterministically associate files and shell-\ncommands without changing the users workflow. Under Linux file operations are\nperformed by the kernel, tracing these operations thus requires OS-level support.\nDuring the execution of a shell-command, shournal instruments the kernel to\ntrace files used by the shell-process and any of it’s descendant processes. More\nparticular, to keep the tracing-overhead low, only the closing of files is traced\nand (meta-)data collection starts afterwards in an asynchronous manner.\n\n**shournalk** as a kernel module runs directly in *kernel space* and is based on\n[tracepoints](https://www.kernel.org/doc/html/latest/trace/tracepoints.html)\nand the\n[ftrace-framework](https://www.kernel.org/doc/Documentation/trace/ftrace.txt)\nwhich basically allow for custom code to be run at certain kernel\nexecution paths without recompilation of the kernel itself. Only three\nevents are traced: closing of files, fork and exit. (Meta-)data collection\nalso takes place entirely in kernel space.\n\nThe **fanotify backend** employs the kernel-native\n[fanotify filesystem API]( https://man7.org/linux/man-pages/man7/fanotify.7.html)\nto register for close-events of whole\nmount-points which are isolated against unrelated\nprocesses using unshared\n[mount namespaces](https://man7.org/linux/man-pages/man7/mount_namespaces.7.html).\nshournal thereby ensures that all file-operations during the execution of a shell-\ncommand refer to the same, unique mount namespace. While the process-filtering\ntakes place in kernel space — so only file-events of observed processes\nare copied to user-space — the (meta-)data collection happens in user\nspace.\n\n\n## Overhead\nFile tracing imposes a **runtime overhead**.\nFor a detailed performance evaluation please refer to our\n[paper](https://doi.org/10.1038/s41598-024-53811-9) . In brief:\nWe measured the following command executions with shournal v2.9:\n* compile elfutils-0.176\n* git checkout — checkout the Linux kernel’s source code from v4.19 to v3.10.\n* kernel copy — cp of the 4.19 Linux source.\n\nThe relative runtime-overheads are shown in below table,\nstrace is listed for comparison with ptrace-based solutions:\n\n|    Backend    | compile | checkout |  cp   |\n| ------------- | ------- | -------- | ----- |\n| kernel module |  0.05%  |   0.49%  | 0.29% |\n| fanotify      |  1.2%   |   1.3%   | 6.2% |\n| (strace)      |  140%   |   41%    | 100%  |\n\nThe benchmark involves tracing, (meta-)data collection and saving to\na binary temporary file. As this file can be kept indefinitely, the\nfinal storing into the SQL-database is not part of the runtime-measurement.\n\nFor the `cp` benchmark, where ~120.000 file-events occurred\nin ~4 seconds, the runtime overhead of the fanotify backend may become\nnoticeable. Note that many file-events in short time constitute a\nworst-case. Where performance is critical, the kernel module backend\nshould be used.\n\nThe **storage overhead** largely depends on configuration, e.g. the number\nof stored scripts and file-metadata is limited by default, to avoid e.g.\na backup-script from flooding the database. For the cp-test\nthe average disk-usage per file-event is approx. 174 bytes which already\nincludes indexes to speed up queries. So one GiB of disk-space is\nsufficient for approx. 6 million events. Based on the experience of real-world\nusers the database is typically not larger than a few hundred megabytes\nafter months of usage.\n\n\n\n## Credits\nshournal makes use of great tools and libraries, most importantly the Qt-framework,\nxxhash, tsl::ordered_map and cmake and also the Linux-Kernel's *fanotify*.\nFor the html-plot d3js, jquery, popper.js, bootstrap, webpack\nand others are used.\n\nThanks to the developers!\n\nThe project arose in the Hoffmann Research\nGroup: Computational Biology of Aging\nat the Fritz Lipmann Institute in Jena (Germany).\nSpecial thanks to\nSteve&nbsp;Hoffmann\nand Konstantin&nbsp;Riege - without you this project\ncouldn't have been accomplished.\n\n\n# License\nThe whole project is licensed under the GPL, v3 or later\n(see LICENSE file for details) <br>\n**except**\n* The kernel module within `kernel/` which is licensed under\n  the GNU General Public License version 2 only.\n* The libraries within\n  `extern/` → Please refer to the licenses within their\n    respective directories.\n* The javascript-libraries in the auto-generated\n  `html-export/dist/main.js` → the licenses are\n  stored in `html-export/dist/main.licenses.txt`.\n\n\nCopyleft (C) 2021, Tycho Kirchner\n"
  },
  {
    "path": "cmake/FindShournalUtil.cmake",
    "content": "\n# Join a list of strings using seperator sep\n# and store the output in result.\nfunction(JOIN vals sep result)\n  string (REGEX REPLACE \"([^\\\\]|^);\" \"\\\\1${sep}\" _tmp_str \"${vals}\")\n  string (REGEX REPLACE \"[\\\\](.)\" \"\\\\1\" _tmp_str \"${_tmp_str}\")\n  set (${result} \"${_tmp_str}\" PARENT_SCOPE)\nendfunction()\n"
  },
  {
    "path": "extern/folly/LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n\nFiles in folly/external/farmhash licensed as follows\n\n    Copyright (c) 2014 Google, Inc.\n\n    Permission is hereby granted, free of charge, to any person obtaining a copy\n    of this software and associated documentation files (the \"Software\"), to deal\n    in the Software without restriction, including without limitation the rights\n    to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n    copies of the Software, and to permit persons to whom the Software is\n    furnished to do so, subject to the following conditions:\n\n    The above copyright notice and this permission notice shall be included in\n    all copies or substantial portions of the Software.\n\n    THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n    THE SOFTWARE.\n"
  },
  {
    "path": "extern/folly/README.md",
    "content": "Folly: Facebook Open-source Library\n-----------------------------------\n\n[![Build Status](https://travis-ci.org/facebook/folly.svg?branch=master)](https://travis-ci.org/facebook/folly)\n\n### What is `folly`?\n\nFolly (acronymed loosely after Facebook Open Source Library) is a\nlibrary of C++14 components designed with practicality and efficiency\nin mind. **Folly contains a variety of core library components used extensively\nat Facebook**. In particular, it's often a dependency of Facebook's other\nopen source C++ efforts and place where those projects can share code.\n\nIt complements (as opposed to competing against) offerings\nsuch as Boost and of course `std`. In fact, we embark on defining our\nown component only when something we need is either not available, or\ndoes not meet the needed performance profile. We endeavor to remove\nthings from folly if or when `std` or Boost obsoletes them.\n\nPerformance concerns permeate much of Folly, sometimes leading to\ndesigns that are more idiosyncratic than they would otherwise be (see\ne.g. `PackedSyncPtr.h`, `SmallLocks.h`). Good performance at large\nscale is a unifying theme in all of Folly.\n\n### Logical Design\n\nFolly is a collection of relatively independent components, some as\nsimple as a few symbols. There is no restriction on internal\ndependencies, meaning that a given folly module may use any other\nfolly components.\n\nAll symbols are defined in the top-level namespace `folly`, except of\ncourse macros. Macro names are ALL_UPPERCASE and should be prefixed\nwith `FOLLY_`. Namespace `folly` defines other internal namespaces\nsuch as `internal` or `detail`. User code should not depend on symbols\nin those namespaces.\n\nFolly has an `experimental` directory as well. This designation connotes\nprimarily that we feel the API may change heavily over time. This code,\ntypically, is still in heavy use and is well tested.\n\n### Physical Design\n\nAt the top level Folly uses the classic \"stuttering\" scheme\n`folly/folly` used by Boost and others. The first directory serves as\nan installation root of the library (with possible versioning a la\n`folly-1.0/`), and the second is to distinguish the library when\nincluding files, e.g. `#include <folly/FBString.h>`.\n\nThe directory structure is flat (mimicking the namespace structure),\ni.e. we don't have an elaborate directory hierarchy (it is possible\nthis will change in future versions). The subdirectory `experimental`\ncontains files that are used inside folly and possibly at Facebook but\nnot considered stable enough for client use. Your code should not use\nfiles in `folly/experimental` lest it may break when you update Folly.\n\nThe `folly/folly/test` subdirectory includes the unittests for all\ncomponents, usually named `ComponentXyzTest.cpp` for each\n`ComponentXyz.*`. The `folly/folly/docs` directory contains\ndocumentation.\n\n### What's in it?\n\nBecause of folly's fairly flat structure, the best way to see what's in it\nis to look at the headers in [top level `folly/` directory](https://github.com/facebook/folly/tree/master/folly). You can also\ncheck the [`docs` folder](folly/docs) for documentation, starting with the\n[overview](folly/docs/Overview.md).\n\nFolly is published on GitHub at https://github.com/facebook/folly\n\n### Build Notes\n\n#### Dependencies\n\nfolly supports gcc (5.1+), clang, or MSVC. It should run on Linux (x86-32,\nx86-64, and ARM), iOS, macOS, and Windows (x86-64). The CMake build is only\ntested on some of these platforms; at a minimum, we aim to support macOS and\nLinux (on the latest Ubuntu LTS release or newer.)\n\nfolly requires a version of boost compiled with C++14 support.\n\ngoogletest is required to build and run folly's tests.  You can download\nit from https://github.com/google/googletest/archive/release-1.8.0.tar.gz\nThe following commands can be used to download and install it:\n\n```\nwget https://github.com/google/googletest/archive/release-1.8.0.tar.gz && \\\ntar zxf release-1.8.0.tar.gz && \\\nrm -f release-1.8.0.tar.gz && \\\ncd googletest-release-1.8.0 && \\\ncmake . && \\\nmake && \\\nmake install\n```\n\n#### Finding dependencies in non-default locations\n\nIf you have boost, gtest, or other dependencies installed in a non-default\nlocation, you can use the `CMAKE_INCLUDE_PATH` and `CMAKE_LIBRARY_PATH`\nvariables to make CMAKE look also look for header files and libraries in\nnon-standard locations.  For example, to also search the directories\n`/alt/include/path1` and `/alt/include/path2` for header files and the\ndirectories `/alt/lib/path1` and `/alt/lib/path2` for libraries, you can invoke\n`cmake` as follows:\n\n```\ncmake \\\n  -DCMAKE_INCLUDE_PATH=/alt/include/path1:/alt/include/path2 \\\n  -DCMAKE_LIBRARY_PATH=/alt/lib/path1:/alt/lib/path2 ...\n```\n\n#### Building tests\n\nBy default, building the tests is disabled as part of the CMake `all` target.\nTo build the tests, specify `-DBUILD_TESTS=ON` to CMake at configure time.\n\n#### Ubuntu 16.04 LTS\n\nThe following packages are required (feel free to cut and paste the apt-get\ncommand below):\n\n```\nsudo apt-get install \\\n    g++ \\\n    cmake \\\n    libboost-all-dev \\\n    libevent-dev \\\n    libdouble-conversion-dev \\\n    libgoogle-glog-dev \\\n    libgflags-dev \\\n    libiberty-dev \\\n    liblz4-dev \\\n    liblzma-dev \\\n    libsnappy-dev \\\n    make \\\n    zlib1g-dev \\\n    binutils-dev \\\n    libjemalloc-dev \\\n    libssl-dev \\\n    pkg-config \\\n    libunwind-dev\n```\n\nFolly relies on [fmt](https://github.com/fmtlib/fmt) which needs to be installed from source.\nThe following commands will download, compile, and install fmt.\n\n```\ngit clone https://github.com/fmtlib/fmt.git && cd fmt\n\nmkdir _build && cd _build\ncmake ..\n\nmake -j$(nproc)\nsudo make install\n```\n\nIf advanced debugging functionality is required, use:\n\n```\nsudo apt-get install \\\n    libunwind8-dev \\\n    libelf-dev \\\n    libdwarf-dev\n```\n\nIn the folly directory (e.g. the checkout root or the archive unpack root), run:\n```\n  mkdir _build && cd _build\n  cmake ..\n  make -j $(nproc)\n  make install # with either sudo or DESTDIR as necessary\n```\n\n#### OS X (Homebrew)\n\nfolly is available as a Formula and releases may be built via `brew install folly`.\n\nYou may also use `folly/build/bootstrap-osx-homebrew.sh` to build against `master`:\n\n```\n  ./folly/build/bootstrap-osx-homebrew.sh\n```\n\nThis will create a build directory `_build` in the top-level.\n\n#### OS X (MacPorts)\n\nInstall the required packages from MacPorts:\n\n```\n  sudo port install \\\n    boost \\\n    cmake \\\n    gflags \\\n    git \\\n    google-glog \\\n    libevent \\\n    libtool \\\n    lz4 \\\n    lzma \\\n    openssl \\\n    snappy \\\n    xz \\\n    zlib\n```\n\nDownload and install double-conversion:\n\n```\n  git clone https://github.com/google/double-conversion.git\n  cd double-conversion\n  cmake -DBUILD_SHARED_LIBS=ON .\n  make\n  sudo make install\n```\n\nDownload and install folly with the parameters listed below:\n\n```\n  git clone https://github.com/facebook/folly.git\n  cd folly\n  mkdir _build\n  cd _build\n  cmake ..\n  make\n  sudo make install\n```\n\n#### Windows (Vcpkg)\n\nfolly is available in [Vcpkg](https://github.com/Microsoft/vcpkg#vcpkg) and releases may be built via `vcpkg install folly:x64-windows`.\n\nYou may also use `vcpkg install folly:x64-windows --head` to build against `master`.\n\n#### Other Linux distributions\n\n- double-conversion (https://github.com/google/double-conversion)\n\n  Download and build double-conversion.\n  You may need to tell cmake where to find it.\n\n  [double-conversion/] `ln -s src double-conversion`\n\n  [folly/] `mkdir build && cd build`\n  [folly/build/] `cmake \"-DCMAKE_INCLUDE_PATH=$DOUBLE_CONVERSION_HOME/include\" \"-DCMAKE_LIBRARY_PATH=$DOUBLE_CONVERSION_HOME/lib\" ..`\n\n  [folly/build/] `make`\n\n- additional platform specific dependencies:\n\n  Fedora >= 21 64-bit (last tested on Fedora 28 64-bit)\n    - gcc\n    - gcc-c++\n    - cmake\n    - automake\n    - boost-devel\n    - libtool\n    - lz4-devel\n    - lzma-devel\n    - snappy-devel\n    - zlib-devel\n    - glog-devel\n    - gflags-devel\n    - scons\n    - double-conversion-devel\n    - openssl-devel\n    - libevent-devel\n    - fmt-devel\n    - libsodium-devel\n    \n  Optional\n    - libdwarf-devel\n    - elfutils-libelf-devel\n    - libunwind-devel\n"
  },
  {
    "path": "extern/folly/UninitializedMemoryHacks.h",
    "content": "/*\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n#include <type_traits>\n#include <vector>\n\nnamespace {\n// This struct is different in every translation unit.  We use template\n// instantiations to define inline freestanding methods.  Since the\n// methods are inline it is fine to define them in multiple translation\n// units, but the instantiation itself would be an ODR violation if it is\n// present in the program more than once.  By tagging the instantiations\n// with this struct, we avoid ODR problems for the instantiation while\n// allowing the resulting methods to be inline-able.  If you think that\n// seems hacky keep reading...\nstruct FollyMemoryDetailTranslationUnitTag {};\n} // namespace\nnamespace folly {\nnamespace detail {\ntemplate <typename T>\nvoid unsafeStringSetLargerSize(std::basic_string<T>& s, std::size_t n);\ntemplate <typename T>\nvoid unsafeVectorSetLargerSize(std::vector<T>& v, std::size_t n);\n} // namespace detail\n\n/*\n * This file provides helper functions resizeWithoutInitialization()\n * that can resize std::basic_string or std::vector without constructing\n * or initializing new elements.\n *\n * IMPORTANT: These functions can be unsafe if used improperly.  If you\n * don't write to an element with index >= oldSize and < newSize, reading\n * the element can expose arbitrary memory contents to the world, including\n * the contents of old strings.  If you're lucky you'll get a segfault,\n * because the kernel is only required to fault in new pages on write\n * access.  MSAN should be able to catch problems in the common case that\n * the string or vector wasn't previously shrunk.\n *\n * Pay extra attention to your failure paths.  For example, if you try\n * to read directly into a caller-provided string, make sure to clear\n * the string when you get an I/O error.\n *\n * You should only use this if you have profiling data from production\n * that shows that this is not a premature optimization.  This code is\n * designed for retroactively optimizing code where touching every element\n * twice (or touching never-used elements once) shows up in profiling,\n * and where restructuring the code to use fixed-length arrays or IOBuf-s\n * would be difficult.\n *\n * NOTE: Just because .resize() shows up in your profile (probably\n * via one of the intrinsic memset implementations) doesn't mean that\n * these functions will make your program faster.  A lot of the cost\n * of memset comes from cache misses, so avoiding the memset can mean\n * that the cache miss cost just gets pushed to the following code.\n * resizeWithoutInitialization can be a win when the contents are bigger\n * than a cache level, because the second access isn't free in that case.\n * It can be a win when the memory is already cached, so touching it\n * doesn't help later code.  It can also be a win if the final length\n * of the string or vector isn't actually known, so the suffix will be\n * chopped off with a second call to .resize().\n */\n\n/**\n * Like calling s.resize(n), but when growing the string does not\n * initialize new elements.  It is undefined behavior to read from\n * any element added to the string by this method unless it has been\n * written to by an operation that follows this call.\n *\n * Use the FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(T) macro to\n * declare (and inline define) the internals required to call\n * resizeWithoutInitialization for a std::basic_string<T>.\n * See detailed description of a similar macro for std::vector<T> below.\n *\n * IMPORTANT: Read the warning at the top of this header file.\n */\ntemplate <\n    typename T,\n    typename =\n        typename std::enable_if<std::is_trivially_destructible<T>::value>::type>\ninline void resizeWithoutInitialization(\n    std::basic_string<T>& s,\n    std::size_t n) {\n  if (n <= s.size()) {\n    s.resize(n);\n  } else {\n    // careful not to call reserve unless necessary, as it causes\n    // shrink_to_fit on many platforms\n    if (n > s.capacity()) {\n      s.reserve(n);\n    }\n    detail::unsafeStringSetLargerSize(s, n);\n  }\n}\n\n/**\n * Like calling v.resize(n), but when growing the vector does not construct\n * or initialize new elements.  It is undefined behavior to read from any\n * element added to the vector by this method unless it has been written\n * to by an operation that follows this call.\n *\n * Use the FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT(T) macro to\n * declare (and inline define) the internals required to call\n * resizeWithoutInitialization for a std::vector<T>.  This must\n * be done exactly once in each translation unit that wants to call\n * resizeWithoutInitialization(std::vector<T>&,size_t).  char and unsigned\n * char are provided by default.  If you don't do this you will get linker\n * errors about folly::detail::unsafeVectorSetLargerSize.  Requiring that\n * T be trivially_destructible is only an approximation of the property\n * required of T.  In fact what is required is that any random sequence of\n * bytes may be safely reinterpreted as a T and passed to T's destructor.\n *\n * std::vector<bool> has specialized internals and is not supported.\n *\n * IMPORTANT: Read the warning at the top of this header file.\n */\ntemplate <\n    typename T,\n    typename = typename std::enable_if<\n        std::is_trivially_destructible<T>::value &&\n        !std::is_same<T, bool>::value>::type>\nvoid resizeWithoutInitialization(std::vector<T>& v, std::size_t n) {\n  if (n <= v.size()) {\n    v.resize(n);\n  } else {\n    if (n > v.capacity()) {\n      v.reserve(n);\n    }\n    detail::unsafeVectorSetLargerSize(v, n);\n  }\n}\n\nnamespace detail {\n\n// This machinery bridges template expansion and macro expansion\n#define FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT_IMPL(TYPE)                    \\\n  namespace folly {                                                            \\\n  namespace detail {                                                           \\\n  void unsafeStringSetLargerSizeImpl(std::basic_string<TYPE>& s, std::size_t); \\\n  template <>                                                                  \\\n  inline void unsafeStringSetLargerSize<TYPE>(                                 \\\n      std::basic_string<TYPE> & s,                                             \\\n      std::size_t n) {                                                         \\\n    unsafeStringSetLargerSizeImpl(s, n);                                       \\\n  }                                                                            \\\n  }                                                                            \\\n  }\n\n#if defined(_LIBCPP_STRING)\n// libc++\n\ntemplate <typename Tag, typename T, typename A, A Ptr__set_size>\nstruct MakeUnsafeStringSetLargerSize {\n  friend void unsafeStringSetLargerSizeImpl(\n      std::basic_string<T>& s,\n      std::size_t n) {\n    // s.__set_size(n);\n    (s.*Ptr__set_size)(n);\n    (&s[0])[n] = '\\0';\n  }\n};\n\n#define FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(TYPE)            \\\n  template void std::basic_string<TYPE>::__set_size(std::size_t); \\\n  template struct folly::detail::MakeUnsafeStringSetLargerSize<   \\\n      FollyMemoryDetailTranslationUnitTag,                        \\\n      TYPE,                                                       \\\n      void (std::basic_string<TYPE>::*)(std::size_t),             \\\n      &std::basic_string<TYPE>::__set_size>;                      \\\n  FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT_IMPL(TYPE)\n\n#elif defined(_GLIBCXX_STRING) && _GLIBCXX_USE_CXX11_ABI\n// libstdc++ new implementation with SSO\n\ntemplate <typename Tag, typename T, typename A, A Ptr_M_set_length>\nstruct MakeUnsafeStringSetLargerSize {\n  friend void unsafeStringSetLargerSizeImpl(\n      std::basic_string<T>& s,\n      std::size_t n) {\n    // s._M_set_length(n);\n    (s.*Ptr_M_set_length)(n);\n  }\n};\n\n#define FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(TYPE)               \\\n  template void std::basic_string<TYPE>::_M_set_length(std::size_t); \\\n  template struct folly::detail::MakeUnsafeStringSetLargerSize<      \\\n      FollyMemoryDetailTranslationUnitTag,                           \\\n      TYPE,                                                          \\\n      void (std::basic_string<TYPE>::*)(std::size_t),                \\\n      &std::basic_string<TYPE>::_M_set_length>;                      \\\n  FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT_IMPL(TYPE)\n\n#elif defined(_GLIBCXX_STRING)\n// libstdc++ old implementation\n\ntemplate <\n    typename Tag,\n    typename T,\n    typename A,\n    A Ptr_M_rep,\n    typename B,\n    B Ptr_M_set_length_and_sharable>\nstruct MakeUnsafeStringSetLargerSize {\n  friend void unsafeStringSetLargerSizeImpl(\n      std::basic_string<T>& s,\n      std::size_t n) {\n    // s._M_rep()->_M_set_length_and_sharable(n);\n    auto rep = (s.*Ptr_M_rep)();\n    (rep->*Ptr_M_set_length_and_sharable)(n);\n  }\n};\n\n#define FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(TYPE)                      \\\n  template std::basic_string<TYPE>::_Rep* std::basic_string<TYPE>::_M_rep() \\\n      const;                                                                \\\n  template void std::basic_string<TYPE>::_Rep::_M_set_length_and_sharable(  \\\n      std::size_t);                                                         \\\n  template struct folly::detail::MakeUnsafeStringSetLargerSize<             \\\n      FollyMemoryDetailTranslationUnitTag,                                  \\\n      TYPE,                                                                 \\\n      std::basic_string<TYPE>::_Rep* (std::basic_string<TYPE>::*)() const,  \\\n      &std::basic_string<TYPE>::_M_rep,                                     \\\n      void (std::basic_string<TYPE>::_Rep::*)(std::size_t),                 \\\n      &std::basic_string<TYPE>::_Rep::_M_set_length_and_sharable>;          \\\n  FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT_IMPL(TYPE)\n\n#elif defined(_MSC_VER)\n// MSVC\n\ntemplate <typename Tag, typename T, typename A, A Ptr_Eos>\nstruct MakeUnsafeStringSetLargerSize {\n  friend void unsafeStringSetLargerSizeImpl(\n      std::basic_string<T>& s,\n      std::size_t n) {\n    // _Eos method is public for _MSC_VER <= 1916, private after\n    // s._Eos(n);\n    (s.*Ptr_Eos)(n);\n  }\n};\n\n#define FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(TYPE)          \\\n  template void std::basic_string<TYPE>::_Eos(std::size_t);     \\\n  template struct folly::detail::MakeUnsafeStringSetLargerSize< \\\n      FollyMemoryDetailTranslationUnitTag,                      \\\n      TYPE,                                                     \\\n      void (std::basic_string<TYPE>::*)(std::size_t),           \\\n      &std::basic_string<TYPE>::_Eos>;                          \\\n  FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT_IMPL(TYPE)\n\n#else\n#warning \\\n    \"No implementation for resizeWithoutInitialization of std::basic_string\"\n#endif\n\n} // namespace detail\n} // namespace folly\n\n#if defined(FOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT)\nFOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(char)\nFOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(wchar_t)\n#endif\n\nnamespace folly {\nnamespace detail {\n\n// This machinery bridges template expansion and macro expansion\n#define FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT_IMPL(TYPE)              \\\n  namespace folly {                                                      \\\n  namespace detail {                                                     \\\n  void unsafeVectorSetLargerSizeImpl(std::vector<TYPE>& v, std::size_t); \\\n  template <>                                                            \\\n  inline void unsafeVectorSetLargerSize<TYPE>(                           \\\n      std::vector<TYPE> & v,                                             \\\n      std::size_t n) {                                                   \\\n    unsafeVectorSetLargerSizeImpl(v, n);                                 \\\n  }                                                                      \\\n  }                                                                      \\\n  }\n\n#if defined(_LIBCPP_VECTOR)\n// libc++\n\ntemplate <\n    typename Tag,\n    typename T,\n    typename A,\n    A Ptr__end_,\n    typename B,\n    B Ptr__annotate_contiguous_container_>\nstruct MakeUnsafeVectorSetLargerSize {\n  friend void unsafeVectorSetLargerSizeImpl(std::vector<T>& v, std::size_t n) {\n    // v.__end_ += (n - v.size());\n    using Base = std::__vector_base<T, std::allocator<T>>;\n    static_assert(\n        std::is_standard_layout<std::vector<T>>::value &&\n            sizeof(std::vector<T>) == sizeof(Base),\n        \"reinterpret_cast safety conditions not met\");\n    const auto old_size = v.size();\n    reinterpret_cast<Base&>(v).*Ptr__end_ += (n - v.size());\n\n    // libc++ contiguous containers use special annotation functions that help\n    // the address sanitizer to detect improper memory accesses. When ASAN is\n    // enabled we need to call the appropriate annotation functions in order to\n    // stop ASAN from reporting false positives. When ASAN is disabled, the\n    // annotation function is a no-op.\n    (v.*Ptr__annotate_contiguous_container_)(\n        v.data(),\n        v.data() + v.capacity(),\n        v.data() + old_size,\n        v.data() + v.size());\n  }\n};\n\n#define FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT(TYPE)               \\\n  template struct folly::detail::MakeUnsafeVectorSetLargerSize<      \\\n      FollyMemoryDetailTranslationUnitTag,                           \\\n      TYPE,                                                          \\\n      TYPE*(std::__vector_base<TYPE, std::allocator<TYPE>>::*),      \\\n      &std::vector<TYPE>::__end_,                                    \\\n      void (std::vector<TYPE>::*)(                                   \\\n          const void*, const void*, const void*, const void*) const, \\\n      &std::vector<TYPE>::__annotate_contiguous_container>;          \\\n  FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT_IMPL(TYPE)\n\n#elif defined(_GLIBCXX_VECTOR)\n// libstdc++\n\ntemplate <\n    typename Tag,\n    typename T,\n    typename A,\n    A Ptr_M_impl,\n    typename B,\n    B Ptr_M_finish>\nstruct MakeUnsafeVectorSetLargerSize : std::vector<T> {\n  friend void unsafeVectorSetLargerSizeImpl(std::vector<T>& v, std::size_t n) {\n    // v._M_impl._M_finish += (n - v.size());\n    (v.*Ptr_M_impl).*Ptr_M_finish += (n - v.size());\n  }\n};\n\n#define FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT(TYPE)          \\\n  template struct folly::detail::MakeUnsafeVectorSetLargerSize< \\\n      FollyMemoryDetailTranslationUnitTag,                      \\\n      TYPE,                                                     \\\n      decltype(&std::vector<TYPE>::_M_impl),                    \\\n      &std::vector<TYPE>::_M_impl,                              \\\n      decltype(&std::vector<TYPE>::_Vector_impl::_M_finish),    \\\n      &std::vector<TYPE>::_Vector_impl::_M_finish>;             \\\n  FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT_IMPL(TYPE)\n\n#elif defined(_MSC_VER) && _MSC_VER <= 1916\n// MSVC <= VS2017\n\ntemplate <typename Tag, typename T>\nstruct MakeUnsafeVectorSetLargerSize : std::vector<T> {\n  friend void unsafeVectorSetLargerSizeImpl(std::vector<T>& v, std::size_t n) {\n    v._Mylast() += (n - v.size());\n  }\n};\n\n#define FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT(TYPE)          \\\n  template struct folly::detail::MakeUnsafeVectorSetLargerSize< \\\n      FollyMemoryDetailTranslationUnitTag,                      \\\n      TYPE>;                                                    \\\n  FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT_IMPL(TYPE)\n\n#elif defined(_MSC_VER) && _MSC_VER > 1916\n// MSVC >= VS2019\n\ntemplate <\n    typename Tag,\n    typename T,\n    typename A,\n    A Ptr_Mypair,\n    typename B,\n    B Ptr_Myval2,\n    typename C,\n    C Ptr_Mylast>\nstruct MakeUnsafeVectorSetLargerSize : std::vector<T> {\n  friend void unsafeVectorSetLargerSizeImpl(std::vector<T>& v, std::size_t n) {\n    // v._Mypair._Myval2._Mylast += (n - v.size());\n    ((v.*Ptr_Mypair).*Ptr_Myval2).*Ptr_Mylast += (n - v.size());\n  }\n};\n\n#define FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT(TYPE)                         \\\n  template struct folly::detail::MakeUnsafeVectorSetLargerSize<                \\\n      FollyMemoryDetailTranslationUnitTag,                                     \\\n      TYPE,                                                                    \\\n      decltype(&std::vector<TYPE>::_Mypair),                                   \\\n      &std::vector<TYPE>::_Mypair,                                             \\\n      decltype(&decltype(std::declval<std::vector<TYPE>>()._Mypair)::_Myval2), \\\n      &decltype(std::declval<std::vector<TYPE>>()._Mypair)::_Myval2,           \\\n      decltype(&decltype(                                                      \\\n          std::declval<std::vector<TYPE>>()._Mypair._Myval2)::_Mylast),        \\\n      &decltype(std::declval<std::vector<TYPE>>()._Mypair._Myval2)::_Mylast>;  \\\n  FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT_IMPL(TYPE)\n\n#else\n#warning \"No implementation for resizeWithoutInitialization of std::vector\"\n#endif\n\n} // namespace detail\n} // namespace folly\n\n#if defined(FOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT)\nFOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT(char)\nFOLLY_DECLARE_VECTOR_RESIZE_WITHOUT_INIT(unsigned char)\n#endif\n"
  },
  {
    "path": "extern/tsl-ordered-map/CMakeLists.txt",
    "content": "\nadd_library(lib_orderedmap\n  ordered_hash.h\n  ordered_map.h\n  ordered_set.h\n)\n\nset_target_properties(lib_orderedmap PROPERTIES LINKER_LANGUAGE CXX)\n\ntarget_link_libraries(lib_orderedmap\n    Qt5::Core\n)\n"
  },
  {
    "path": "extern/tsl-ordered-map/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2017 Tessil\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "extern/tsl-ordered-map/ordered_hash.h",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Tessil\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#ifndef TSL_ORDERED_HASH_H\n#define TSL_ORDERED_HASH_H\n\n\n#include <algorithm>\n#include <cassert>\n#include <climits>\n#include <cmath>\n#include <cstddef>\n#include <cstdint>\n#include <exception>\n#include <functional>\n#include <iterator>\n#include <limits>\n#include <memory>\n#include <stdexcept>\n#include <tuple>\n#include <type_traits>\n#include <utility>\n#include <vector>\n\n\n/**\n * Macros for compatibility with GCC 4.8\n */\n#if (defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9))\n#    define TSL_OH_NO_CONTAINER_ERASE_CONST_ITERATOR\n#    define TSL_OH_NO_CONTAINER_EMPLACE_CONST_ITERATOR\n#endif\n\n/**\n * Only activate tsl_oh_assert if TSL_DEBUG is defined. \n * This way we avoid the performance hit when NDEBUG is not defined with assert as tsl_oh_assert is used a lot\n * (people usually compile with \"-O3\" and not \"-O3 -DNDEBUG\").\n */\n#ifdef TSL_DEBUG\n#    define tsl_oh_assert(expr) assert(expr)\n#else\n#    define tsl_oh_assert(expr) (static_cast<void>(0))\n#endif\n\n/**\n * If exceptions are enabled, throw the exception passed in parameter, otherwise call std::terminate.\n */\n#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || (defined (_MSC_VER) && defined (_CPPUNWIND))) && !defined(TSL_NO_EXCEPTIONS)\n#    define TSL_OH_THROW_OR_TERMINATE(ex, msg) throw ex(msg)\n#else\n#    define TSL_OH_NO_EXCEPTIONS\n#    ifdef NDEBUG\n#        define TSL_OH_THROW_OR_TERMINATE(ex, msg) std::terminate()\n#    else\n#        include <iostream>\n#        define TSL_OH_THROW_OR_TERMINATE(ex, msg) do { std::cerr << msg << std::endl; std::terminate(); } while(0)\n#    endif\n#endif\n\n\nnamespace tsl {\n\nnamespace detail_ordered_hash {\n    \ntemplate<typename T>\nstruct make_void {\n    using type = void;\n};\n\ntemplate<typename T, typename = void>\nstruct has_is_transparent: std::false_type {\n};\n\ntemplate<typename T>\nstruct has_is_transparent<T, typename make_void<typename T::is_transparent>::type>: std::true_type {\n};\n\n\ntemplate<typename T, typename = void>\nstruct is_vector: std::false_type {\n};\n\ntemplate<typename T>\nstruct is_vector<T, typename std::enable_if<\n                        std::is_same<T, std::vector<typename T::value_type, typename T::allocator_type>>::value\n                    >::type>: std::true_type {\n};\n\ntemplate<typename T, typename U>\nstatic T numeric_cast(U value, const char* error_message = \"numeric_cast() failed.\") {\n    T ret = static_cast<T>(value);\n    if(static_cast<U>(ret) != value) {\n        TSL_OH_THROW_OR_TERMINATE(std::runtime_error, error_message);\n    }\n    \n    const bool is_same_signedness = (std::is_unsigned<T>::value && std::is_unsigned<U>::value) ||\n                                    (std::is_signed<T>::value && std::is_signed<U>::value);\n    if(!is_same_signedness && (ret < T{}) != (value < U{})) {\n        TSL_OH_THROW_OR_TERMINATE(std::runtime_error, error_message);\n    }\n    \n    return ret;\n}\n\n\n/**\n * Fixed size type used to represent size_type values on serialization. Need to be big enough\n * to represent a std::size_t on 32 and 64 bits platforms, and must be the same size on both platforms.\n */\nusing slz_size_type = std::uint64_t;\nstatic_assert(std::numeric_limits<slz_size_type>::max() >= std::numeric_limits<std::size_t>::max(),\n              \"slz_size_type must be >= std::size_t\");\n\ntemplate<class T, class Deserializer>\nstatic T deserialize_value(Deserializer& deserializer) {\n    // MSVC < 2017 is not conformant, circumvent the problem by removing the template keyword\n#if defined (_MSC_VER) && _MSC_VER < 1910\n    return deserializer.Deserializer::operator()<T>();\n#else\n    return deserializer.Deserializer::template operator()<T>();\n#endif\n}\n\n\n/**\n * Each bucket entry stores an index which is the index in m_values corresponding to the bucket's value \n * and a hash (which may be truncated to 32 bits depending on IndexType) corresponding to the hash of the value.\n * \n * The size of IndexType limits the size of the hash table to std::numeric_limits<IndexType>::max() - 1 elements (-1 due to \n * a reserved value used to mark a bucket as empty).\n */\ntemplate<class IndexType>\nclass bucket_entry {\n    static_assert(std::is_unsigned<IndexType>::value, \"IndexType must be an unsigned value.\");\n    static_assert(std::numeric_limits<IndexType>::max() <= std::numeric_limits<std::size_t>::max(), \n                  \"std::numeric_limits<IndexType>::max() must be <= std::numeric_limits<std::size_t>::max().\");\n    \npublic:\n    using index_type = IndexType;\n    using truncated_hash_type = typename std::conditional<std::numeric_limits<IndexType>::max() <= \n                                                          std::numeric_limits<std::uint_least32_t>::max(),\n                                                              std::uint_least32_t, \n                                                              std::size_t>::type;\n    \n    bucket_entry() noexcept: m_index(EMPTY_MARKER_INDEX), m_hash(0) {\n    }\n    \n    bool empty() const noexcept {\n        return m_index == EMPTY_MARKER_INDEX;\n    }\n    \n    void clear() noexcept {\n        m_index = EMPTY_MARKER_INDEX;\n    }\n    \n    index_type index() const noexcept {\n        tsl_oh_assert(!empty());\n        return m_index;\n    }\n    \n    index_type& index_ref() noexcept {\n        tsl_oh_assert(!empty());\n        return m_index;\n    }\n    \n    void set_index(index_type index) noexcept {\n        tsl_oh_assert(index <= max_size());\n        \n        m_index = index;\n    }\n    \n    truncated_hash_type truncated_hash() const noexcept {\n        tsl_oh_assert(!empty());\n        return m_hash;\n    }\n    \n    truncated_hash_type& truncated_hash_ref() noexcept {\n        tsl_oh_assert(!empty());\n        return m_hash;\n    }\n    \n    void set_hash(std::size_t hash) noexcept {\n        m_hash = truncate_hash(hash);\n    }\n    \n    template<class Serializer>\n    void serialize(Serializer& serializer) const {\n        const slz_size_type index = m_index;\n        serializer(index);\n        \n        const slz_size_type hash = m_hash;\n        serializer(hash);\n    }\n    \n    template<class Deserializer>\n    static bucket_entry deserialize(Deserializer& deserializer) {\n        const slz_size_type index = deserialize_value<slz_size_type>(deserializer);\n        const slz_size_type hash = deserialize_value<slz_size_type>(deserializer);\n        \n        bucket_entry bentry;\n        bentry.m_index = numeric_cast<index_type>(index, \"Deserialized index is too big.\");\n        bentry.m_hash = numeric_cast<truncated_hash_type>(hash, \"Deserialized hash is too big.\");\n        \n        return bentry;\n    }\n    \n    \n    \n    static truncated_hash_type truncate_hash(std::size_t hash) noexcept {\n        return truncated_hash_type(hash);\n    }\n    \n    static std::size_t max_size() noexcept {\n        return static_cast<std::size_t>(std::numeric_limits<index_type>::max()) - NB_RESERVED_INDEXES;\n    }\n    \nprivate:\n    static const index_type EMPTY_MARKER_INDEX = std::numeric_limits<index_type>::max();\n    static const std::size_t NB_RESERVED_INDEXES = 1;\n    \n    index_type m_index;\n    truncated_hash_type m_hash;\n};\n\n\n\n/**\n * Internal common class used by ordered_map and ordered_set.\n * \n * ValueType is what will be stored by ordered_hash (usually std::pair<Key, T> for map and Key for set).\n * \n * KeySelect should be a FunctionObject which takes a ValueType in parameter and return a reference to the key.\n * \n * ValueSelect should be a FunctionObject which takes a ValueType in parameter and return a reference to the value. \n * ValueSelect should be void if there is no value (in set for example).\n * \n * ValueTypeContainer is the container which will be used to store ValueType values. \n * Usually a std::deque<ValueType, Allocator> or std::vector<ValueType, Allocator>.\n * \n * \n * \n * The orderd_hash structure is a hash table which preserves the order of insertion of the elements.\n * To do so, it stores the values in the ValueTypeContainer (m_values) using emplace_back at each\n * insertion of a new element. Another structure (m_buckets of type std::vector<bucket_entry>) will \n * serve as buckets array for the hash table part. Each bucket stores an index which corresponds to \n * the index in m_values where the bucket's value is and the (truncated) hash of this value. An index\n * is used instead of a pointer to the value to reduce the size of each bucket entry.\n * \n * To resolve collisions in the buckets array, the structures use robin hood linear probing with \n * backward shift deletion.\n */\ntemplate<class ValueType,\n         class KeySelect,\n         class ValueSelect,\n         class Hash,\n         class KeyEqual,\n         class Allocator,\n         class ValueTypeContainer,\n         class IndexType>\nclass ordered_hash: private Hash, private KeyEqual {\nprivate:\n    template<typename U>\n    using has_mapped_type = typename std::integral_constant<bool, !std::is_same<U, void>::value>;\n    \n    static_assert(std::is_same<typename ValueTypeContainer::value_type, ValueType>::value, \n                  \"ValueTypeContainer::value_type != ValueType. \"\n                  \"Check that the ValueTypeContainer has 'Key' as type for a set or 'std::pair<Key, T>' as type for a map.\");\n    \n    static_assert(std::is_same<typename ValueTypeContainer::allocator_type, Allocator>::value, \n                  \"ValueTypeContainer::allocator_type != Allocator. \"\n                  \"Check that the allocator for ValueTypeContainer is the same as Allocator.\");\n    \n    static_assert(std::is_same<typename Allocator::value_type, ValueType>::value, \n                  \"Allocator::value_type != ValueType. \"\n                  \"Check that the allocator has 'Key' as type for a set or 'std::pair<Key, T>' as type for a map.\");\n    \n    \npublic:\n    template<bool IsConst>\n    class ordered_iterator;\n    \n    using key_type = typename KeySelect::key_type;\n    using value_type = ValueType;\n    using size_type = std::size_t;\n    using difference_type = std::ptrdiff_t;\n    using hasher = Hash;\n    using key_equal = KeyEqual;\n    using allocator_type = Allocator;\n    using reference = value_type&;\n    using const_reference = const value_type&;\n    using pointer = value_type*;\n    using const_pointer = const value_type*;\n    using iterator = ordered_iterator<false>;\n    using const_iterator = ordered_iterator<true>;\n    using reverse_iterator = std::reverse_iterator<iterator>;\n    using const_reverse_iterator = std::reverse_iterator<const_iterator>;\n    \n    using values_container_type = ValueTypeContainer;\n    \npublic:\n    template<bool IsConst>\n    class ordered_iterator {\n        friend class ordered_hash;\n        \n    private:\n        using iterator = typename std::conditional<IsConst, \n                                                    typename values_container_type::const_iterator, \n                                                    typename values_container_type::iterator>::type;\n    \n        \n        ordered_iterator(iterator it) noexcept: m_iterator(it) {\n        }\n        \n    public:\n        using iterator_category = std::random_access_iterator_tag;\n        using value_type = const typename ordered_hash::value_type;\n        using difference_type = typename iterator::difference_type;\n        using reference = value_type&;\n        using pointer = value_type*;\n        \n        \n        ordered_iterator() noexcept {\n        }\n        \n        // Copy constructor from iterator to const_iterator.\n        template<bool TIsConst = IsConst, typename std::enable_if<TIsConst>::type* = nullptr>\n        ordered_iterator(const ordered_iterator<!TIsConst>& other) noexcept: m_iterator(other.m_iterator) {\n        }\n\n        ordered_iterator(const ordered_iterator& other) = default;\n        ordered_iterator(ordered_iterator&& other) = default;\n        ordered_iterator& operator=(const ordered_iterator& other) = default;\n        ordered_iterator& operator=(ordered_iterator&& other) = default;\n\n        const typename ordered_hash::key_type& key() const {\n            return KeySelect()(*m_iterator);\n        }\n\n        template<class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value && IsConst>::type* = nullptr>\n        const typename U::value_type& value() const {\n            return U()(*m_iterator);\n        }\n\n        template<class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value && !IsConst>::type* = nullptr>\n        typename U::value_type& value() {\n            return U()(*m_iterator);\n        }\n        \n        reference operator*() const { return *m_iterator; }\n        pointer operator->() const { return m_iterator.operator->(); }\n        \n        ordered_iterator& operator++() { ++m_iterator; return *this; }\n        ordered_iterator& operator--() { --m_iterator; return *this; }\n        \n        ordered_iterator operator++(int) { ordered_iterator tmp(*this); ++(*this); return tmp; }\n        ordered_iterator operator--(int) { ordered_iterator tmp(*this); --(*this); return tmp; }\n        \n        reference operator[](difference_type n) const { return m_iterator[n]; }\n        \n        ordered_iterator& operator+=(difference_type n) { m_iterator += n; return *this; }\n        ordered_iterator& operator-=(difference_type n) { m_iterator -= n; return *this; }\n        \n        ordered_iterator operator+(difference_type n) { ordered_iterator tmp(*this); tmp += n; return tmp; }\n        ordered_iterator operator-(difference_type n) { ordered_iterator tmp(*this); tmp -= n; return tmp; }\n        \n        friend bool operator==(const ordered_iterator& lhs, const ordered_iterator& rhs) { \n            return lhs.m_iterator == rhs.m_iterator; \n        }\n        \n        friend bool operator!=(const ordered_iterator& lhs, const ordered_iterator& rhs) { \n            return lhs.m_iterator != rhs.m_iterator; \n        }\n        \n        friend bool operator<(const ordered_iterator& lhs, const ordered_iterator& rhs) { \n            return lhs.m_iterator < rhs.m_iterator; \n        }\n        \n        friend bool operator>(const ordered_iterator& lhs, const ordered_iterator& rhs) { \n            return lhs.m_iterator > rhs.m_iterator; \n        }\n        \n        friend bool operator<=(const ordered_iterator& lhs, const ordered_iterator& rhs) { \n            return lhs.m_iterator <= rhs.m_iterator; \n        }\n        \n        friend bool operator>=(const ordered_iterator& lhs, const ordered_iterator& rhs) { \n            return lhs.m_iterator >= rhs.m_iterator; \n        }\n\n        friend ordered_iterator operator+(difference_type n, const ordered_iterator& it) { \n            return n + it.m_iterator;\n        }\n\n        friend difference_type operator-(const ordered_iterator& lhs, const ordered_iterator& rhs) { \n            return lhs.m_iterator - rhs.m_iterator; \n        }\n\n    private:\n        iterator m_iterator;\n    };\n    \n    \nprivate:\n    using bucket_entry = tsl::detail_ordered_hash::bucket_entry<IndexType>;\n                                         \n    using buckets_container_allocator = typename \n                            std::allocator_traits<allocator_type>::template rebind_alloc<bucket_entry>; \n                            \n    using buckets_container_type = std::vector<bucket_entry, buckets_container_allocator>;\n    \n    \n    using truncated_hash_type = typename bucket_entry::truncated_hash_type;\n    using index_type = typename bucket_entry::index_type;\n    \npublic:\n    ordered_hash(size_type bucket_count, \n                 const Hash& hash,\n                 const KeyEqual& equal,\n                 const Allocator& alloc,\n                 float max_load_factor): Hash(hash),\n                                         KeyEqual(equal), \n                                         m_buckets_data(alloc), \n                                         m_buckets(static_empty_bucket_ptr()), \n                                         m_mask(0),\n                                         m_values(alloc), \n                                         m_grow_on_next_insert(false)\n    {\n        if(bucket_count > max_bucket_count()) {\n            TSL_OH_THROW_OR_TERMINATE(std::length_error, \"The map exceeds its maxmimum size.\");\n        }\n        \n        if(bucket_count > 0) {\n            bucket_count = round_up_to_power_of_two(bucket_count);\n            \n            m_buckets_data.resize(bucket_count);\n            m_buckets = m_buckets_data.data(),\n            m_mask = bucket_count - 1; \n        }\n        \n        this->max_load_factor(max_load_factor);\n    }\n    \n    ordered_hash(const ordered_hash& other): Hash(other),\n                                             KeyEqual(other),\n                                             m_buckets_data(other.m_buckets_data),\n                                             m_buckets(m_buckets_data.empty()?static_empty_bucket_ptr():\n                                                                              m_buckets_data.data()),\n                                             m_mask(other.m_mask),\n                                             m_values(other.m_values),\n                                             m_grow_on_next_insert(other.m_grow_on_next_insert),\n                                             m_max_load_factor(other.m_max_load_factor),\n                                             m_load_threshold(other.m_load_threshold)\n    {\n    }\n    \n    ordered_hash(ordered_hash&& other) noexcept(std::is_nothrow_move_constructible<Hash>::value &&\n                                                std::is_nothrow_move_constructible<KeyEqual>::value &&\n                                                std::is_nothrow_move_constructible<buckets_container_type>::value &&\n                                                std::is_nothrow_move_constructible<values_container_type>::value)\n                                          : Hash(std::move(static_cast<Hash&>(other))),\n                                            KeyEqual(std::move(static_cast<KeyEqual&>(other))),\n                                            m_buckets_data(std::move(other.m_buckets_data)),\n                                            m_buckets(m_buckets_data.empty()?static_empty_bucket_ptr():\n                                                                             m_buckets_data.data()),\n                                            m_mask(other.m_mask),\n                                            m_values(std::move(other.m_values)),\n                                            m_grow_on_next_insert(other.m_grow_on_next_insert),\n                                            m_max_load_factor(other.m_max_load_factor),\n                                            m_load_threshold(other.m_load_threshold)\n    {\n        other.m_buckets_data.clear();\n        other.m_buckets = static_empty_bucket_ptr();\n        other.m_mask = 0;\n        other.m_values.clear();\n        other.m_grow_on_next_insert = false;\n        other.m_load_threshold = 0;\n    }\n    \n    ordered_hash& operator=(const ordered_hash& other) {\n        if(&other != this) {\n            Hash::operator=(other);\n            KeyEqual::operator=(other);\n            \n            m_buckets_data = other.m_buckets_data;\n            m_buckets = m_buckets_data.empty()?static_empty_bucket_ptr():\n                                               m_buckets_data.data();\n                                                        \n            m_mask = other.m_mask;\n            m_values = other.m_values;\n            m_grow_on_next_insert = other.m_grow_on_next_insert;\n            m_max_load_factor = other.m_max_load_factor;\n            m_load_threshold = other.m_load_threshold;\n        }\n        \n        return *this;\n    }\n    \n    ordered_hash& operator=(ordered_hash&& other) {\n        other.swap(*this);\n        other.clear();\n        \n        return *this;\n    }\n    \n    allocator_type get_allocator() const {\n        return m_values.get_allocator();\n    }\n    \n    \n    /*\n     * Iterators\n     */\n    iterator begin() noexcept {\n        return iterator(m_values.begin());\n    }\n    \n    const_iterator begin() const noexcept {\n        return cbegin();\n    }\n    \n    const_iterator cbegin() const noexcept {\n        return const_iterator(m_values.cbegin());\n    }\n    \n    iterator end() noexcept {\n        return iterator(m_values.end());\n    }\n    \n    const_iterator end() const noexcept {\n        return cend();\n    }\n    \n    const_iterator cend() const noexcept {\n        return const_iterator(m_values.cend());\n    }  \n    \n    \n    reverse_iterator rbegin() noexcept {\n        return reverse_iterator(m_values.end());\n    }\n    \n    const_reverse_iterator rbegin() const noexcept {\n        return rcbegin();\n    }\n    \n    const_reverse_iterator rcbegin() const noexcept {\n        return const_reverse_iterator(m_values.cend());\n    }\n    \n    reverse_iterator rend() noexcept {\n        return reverse_iterator(m_values.begin());\n    }\n    \n    const_reverse_iterator rend() const noexcept {\n        return rcend();\n    }\n    \n    const_reverse_iterator rcend() const noexcept {\n        return const_reverse_iterator(m_values.cbegin());\n    }  \n    \n    \n    /*\n     * Capacity\n     */\n    bool empty() const noexcept {\n        return m_values.empty();\n    }\n    \n    size_type size() const noexcept {\n        return m_values.size();\n    }\n    \n    size_type max_size() const noexcept {\n        return std::min(bucket_entry::max_size(), m_values.max_size());\n    }\n    \n\n    /*\n     * Modifiers\n     */\n    void clear() noexcept {\n        for(auto& bucket: m_buckets_data) {\n            bucket.clear();\n        }\n        \n        m_values.clear();\n        m_grow_on_next_insert = false;\n    }\n    \n    template<typename P>\n    std::pair<iterator, bool> insert(P&& value) {\n        return insert_impl(KeySelect()(value), std::forward<P>(value));\n    }\n    \n    template<typename P>\n    iterator insert_hint(const_iterator hint, P&& value) { \n        if(hint != cend() && compare_keys(KeySelect()(*hint), KeySelect()(value))) { \n            return mutable_iterator(hint); \n        }\n        \n        return insert(std::forward<P>(value)).first; \n    }\n    \n    template<class InputIt>\n    void insert(InputIt first, InputIt last) {\n        if(std::is_base_of<std::forward_iterator_tag, \n                           typename std::iterator_traits<InputIt>::iterator_category>::value) \n        {\n            const auto nb_elements_insert = std::distance(first, last);\n            const size_type nb_free_buckets = m_load_threshold - size();\n            tsl_oh_assert(m_load_threshold >= size());\n            \n            if(nb_elements_insert > 0 && nb_free_buckets < size_type(nb_elements_insert)) {\n                reserve(size() + size_type(nb_elements_insert));\n            }\n        }\n        \n        for(; first != last; ++first) {\n            insert(*first);\n        }\n    }\n    \n    \n    \n    template<class K, class M>\n    std::pair<iterator, bool> insert_or_assign(K&& key, M&& value) {\n        auto it = try_emplace(std::forward<K>(key), std::forward<M>(value));\n        if(!it.second) {\n            it.first.value() = std::forward<M>(value);\n        }\n        \n        return it;\n    }\n    \n    template<class K, class M>\n    iterator insert_or_assign(const_iterator hint, K&& key, M&& obj) {\n        if(hint != cend() && compare_keys(KeySelect()(*hint), key)) { \n            auto it = mutable_iterator(hint); \n            it.value() = std::forward<M>(obj);\n            \n            return it;\n        }\n        \n        return insert_or_assign(std::forward<K>(key), std::forward<M>(obj)).first;\n    }\n    \n    \n    \n    template<class... Args>\n    std::pair<iterator, bool> emplace(Args&&... args) {\n        return insert(value_type(std::forward<Args>(args)...));\n    }\n    \n    template<class... Args>\n    iterator emplace_hint(const_iterator hint, Args&&... args) { \n        return insert_hint(hint, value_type(std::forward<Args>(args)...));\n    }\n    \n    \n    \n    template<class K, class... Args>\n    std::pair<iterator, bool> try_emplace(K&& key, Args&&... value_args) {\n        return insert_impl(key, std::piecewise_construct, \n                                std::forward_as_tuple(std::forward<K>(key)), \n                                std::forward_as_tuple(std::forward<Args>(value_args)...));     \n    }\n    \n    template<class K, class... Args>\n    iterator try_emplace_hint(const_iterator hint, K&& key, Args&&... args) {\n        if(hint != cend() && compare_keys(KeySelect()(*hint), key)) { \n            return mutable_iterator(hint); \n        }\n        \n        return try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;\n    }\n    \n    \n    \n    /**\n     * Here to avoid `template<class K> size_type erase(const K& key)` being used when\n     * we use an `iterator` instead of a `const_iterator`.\n     */\n    iterator erase(iterator pos) {\n        return erase(const_iterator(pos));\n    }\n    \n    iterator erase(const_iterator pos) {\n        tsl_oh_assert(pos != cend());\n        \n        const std::size_t index_erase = iterator_to_index(pos);\n        \n        auto it_bucket = find_key(pos.key(), hash_key(pos.key()));\n        tsl_oh_assert(it_bucket != m_buckets_data.end());\n        \n        erase_value_from_bucket(it_bucket);\n        \n        /*\n         * One element was removed from m_values, due to the left shift the next element \n         * is now at the position of the previous element (or end if none).\n         */\n        return begin() + index_erase;\n    }\n\n    iterator erase(const_iterator first, const_iterator last) {\n        if(first == last) {\n            return mutable_iterator(first);\n        }\n        \n        tsl_oh_assert(std::distance(first, last) > 0);\n        const std::size_t start_index = iterator_to_index(first);\n        const std::size_t nb_values = std::size_t(std::distance(first, last));\n        const std::size_t end_index = start_index + nb_values;\n        \n        // Delete all values\n#ifdef TSL_OH_NO_CONTAINER_ERASE_CONST_ITERATOR     \n        auto next_it = m_values.erase(mutable_iterator(first).m_iterator, mutable_iterator(last).m_iterator);   \n#else\n        auto next_it = m_values.erase(first.m_iterator, last.m_iterator);\n#endif\n        \n        /*\n         * Mark the buckets corresponding to the values as empty and do a backward shift.\n         * \n         * Also, the erase operation on m_values has shifted all the values on the right of last.m_iterator.\n         * Adapt the indexes for these values.\n         */\n        std::size_t ibucket = 0;\n        while(ibucket < m_buckets_data.size()) {\n            if(m_buckets[ibucket].empty()) {\n                ibucket++;\n            }\n            else if(m_buckets[ibucket].index() >= start_index && m_buckets[ibucket].index() < end_index) {\n                m_buckets[ibucket].clear();\n                backward_shift(ibucket);\n                // Don't increment ibucket, backward_shift may have replaced current bucket.\n            }\n            else if(m_buckets[ibucket].index() >= end_index) {\n                m_buckets[ibucket].set_index(index_type(m_buckets[ibucket].index() - nb_values));\n                ibucket++;\n            }\n            else {\n                ibucket++;\n            }\n        }\n        \n        return iterator(next_it);\n    }\n    \n\n    template<class K>\n    size_type erase(const K& key) {\n        return erase(key, hash_key(key));\n    }\n    \n    template<class K>\n    size_type erase(const K& key, std::size_t hash) {\n        return erase_impl(key, hash);\n    }\n    \n    void swap(ordered_hash& other) {\n        using std::swap;\n        \n        swap(static_cast<Hash&>(*this), static_cast<Hash&>(other));\n        swap(static_cast<KeyEqual&>(*this), static_cast<KeyEqual&>(other));\n        swap(m_buckets_data, other.m_buckets_data);\n        swap(m_buckets, other.m_buckets);\n        swap(m_mask, other.m_mask);\n        swap(m_values, other.m_values);\n        swap(m_grow_on_next_insert, other.m_grow_on_next_insert);\n        swap(m_max_load_factor, other.m_max_load_factor);\n        swap(m_load_threshold, other.m_load_threshold);\n    }\n    \n        \n    \n\n    /*\n     * Lookup\n     */    \n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    typename U::value_type& at(const K& key) {\n        return at(key, hash_key(key));\n    }\n    \n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    typename U::value_type& at(const K& key, std::size_t hash) {\n        return const_cast<typename U::value_type&>(static_cast<const ordered_hash*>(this)->at(key, hash));\n    }\n    \n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    const typename U::value_type& at(const K& key) const {\n        return at(key, hash_key(key));\n    }\n    \n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    const typename U::value_type& at(const K& key, std::size_t hash) const {\n        auto it = find(key, hash);\n        if(it != end()) {\n            return it.value();\n        }\n        else {\n            TSL_OH_THROW_OR_TERMINATE(std::out_of_range, \"Couldn't find the key.\");\n        }\n    }\n    \n    \n    template<class K, class U = ValueSelect, typename std::enable_if<has_mapped_type<U>::value>::type* = nullptr>\n    typename U::value_type& operator[](K&& key) {\n        return try_emplace(std::forward<K>(key)).first.value();\n    }\n    \n    \n    template<class K>\n    size_type count(const K& key) const {\n        return count(key, hash_key(key));\n    }\n    \n    template<class K>\n    size_type count(const K& key, std::size_t hash) const {\n        if(find(key, hash) == cend()) {\n            return 0;\n        }\n        else {\n            return 1;\n        }\n    }\n    \n    template<class K>\n    iterator find(const K& key) {\n        return find(key, hash_key(key));\n    }\n    \n    template<class K>\n    iterator find(const K& key, std::size_t hash) {\n        auto it_bucket = find_key(key, hash);\n        return (it_bucket != m_buckets_data.end())?iterator(m_values.begin() + it_bucket->index()):end();\n    }\n    \n    template<class K>\n    const_iterator find(const K& key) const {\n        return find(key, hash_key(key));\n    }\n    \n    template<class K>\n    const_iterator find(const K& key, std::size_t hash) const {\n        auto it_bucket = find_key(key, hash);\n        return (it_bucket != m_buckets_data.cend())?const_iterator(m_values.begin() + it_bucket->index()):end();\n    }\n    \n    \n    template<class K>\n    std::pair<iterator, iterator> equal_range(const K& key) {\n        return equal_range(key, hash_key(key));\n    }\n    \n    template<class K>\n    std::pair<iterator, iterator> equal_range(const K& key, std::size_t hash) {\n        iterator it = find(key, hash);\n        return std::make_pair(it, (it == end())?it:std::next(it));\n    }\n    \n    template<class K>\n    std::pair<const_iterator, const_iterator> equal_range(const K& key) const {\n        return equal_range(key, hash_key(key));\n    }\n    \n    template<class K>\n    std::pair<const_iterator, const_iterator> equal_range(const K& key, std::size_t hash) const {\n        const_iterator it = find(key, hash);\n        return std::make_pair(it, (it == cend())?it:std::next(it));\n    }    \n    \n    \n    /*\n     * Bucket interface \n     */\n    size_type bucket_count() const {\n        return m_buckets_data.size(); \n    }\n    \n    size_type max_bucket_count() const {\n        return m_buckets_data.max_size();\n    }    \n    \n    /*\n     *  Hash policy \n     */\n    float load_factor() const {\n        if(bucket_count() == 0) {\n            return 0;\n        }\n        \n        return float(size())/float(bucket_count());\n    }\n    \n    float max_load_factor() const {\n        return m_max_load_factor;\n    }\n    \n    void max_load_factor(float ml) {\n        m_max_load_factor = std::max(0.1f, std::min(ml, 0.95f));\n        m_load_threshold = size_type(float(bucket_count())*m_max_load_factor);\n    }\n    \n    void rehash(size_type count) {\n        count = std::max(count, size_type(std::ceil(float(size())/max_load_factor())));\n        rehash_impl(count);\n    }\n    \n    void reserve(size_type count) {\n        reserve_space_for_values(count);\n        \n        count = size_type(std::ceil(float(count)/max_load_factor()));\n        rehash(count);\n    }\n    \n    \n    /*\n     * Observers\n     */\n    hasher hash_function() const {\n        return static_cast<const Hash&>(*this);\n    }\n    \n    key_equal key_eq() const {\n        return static_cast<const KeyEqual&>(*this);\n    }\n\n    \n    /*\n     * Other\n     */\n    iterator mutable_iterator(const_iterator pos) {\n        return iterator(m_values.begin() + iterator_to_index(pos));\n    }\n    \n    iterator nth(size_type index) {\n        tsl_oh_assert(index <= size());\n        return iterator(m_values.begin() + index);\n    }\n    \n    const_iterator nth(size_type index) const {\n        tsl_oh_assert(index <= size());\n        return const_iterator(m_values.cbegin() + index);\n    }\n    \n    const_reference front() const {\n        tsl_oh_assert(!empty());\n        return m_values.front();\n    }\n    \n    const_reference back() const {\n        tsl_oh_assert(!empty());\n        return m_values.back();\n    }\n    \n    const values_container_type& values_container() const noexcept {\n        return m_values;\n    }\n    \n    template<class U = values_container_type, typename std::enable_if<is_vector<U>::value>::type* = nullptr>    \n    const typename values_container_type::value_type* data() const noexcept {\n        return m_values.data();\n    }\n    \n    template<class U = values_container_type, typename std::enable_if<is_vector<U>::value>::type* = nullptr>    \n    size_type capacity() const noexcept {\n        return m_values.capacity();\n    }\n    \n    void shrink_to_fit() {\n        m_values.shrink_to_fit();\n    }\n    \n    \n    template<typename P>\n    std::pair<iterator, bool> insert_at_position(const_iterator pos, P&& value) {\n        return insert_at_position_impl(pos.m_iterator, KeySelect()(value), std::forward<P>(value));\n    }\n    \n    template<class... Args>\n    std::pair<iterator, bool> emplace_at_position(const_iterator pos, Args&&... args) {\n        return insert_at_position(pos, value_type(std::forward<Args>(args)...));\n    }\n    \n    template<class K, class... Args>\n    std::pair<iterator, bool> try_emplace_at_position(const_iterator pos, K&& key, Args&&... value_args) {\n        return insert_at_position_impl(pos.m_iterator, key, \n                                       std::piecewise_construct, \n                                       std::forward_as_tuple(std::forward<K>(key)), \n                                       std::forward_as_tuple(std::forward<Args>(value_args)...));\n    }\n    \n\n    void pop_back() {\n        tsl_oh_assert(!empty());\n        erase(std::prev(end()));\n    }\n    \n    \n    /**\n     * Here to avoid `template<class K> size_type unordered_erase(const K& key)` being used when\n     * we use a iterator instead of a const_iterator.\n     */    \n    iterator unordered_erase(iterator pos) {\n        return unordered_erase(const_iterator(pos));\n    }\n    \n    iterator unordered_erase(const_iterator pos) {\n        const std::size_t index_erase = iterator_to_index(pos);\n        unordered_erase(pos.key());\n        \n        /*\n         * One element was deleted, index_erase now points to the next element as the elements after\n         * the deleted value were shifted to the left in m_values (will be end() if we deleted the last element).\n         */\n        return begin() + index_erase;\n    }\n    \n    template<class K>\n    size_type unordered_erase(const K& key) {\n        return unordered_erase(key, hash_key(key));\n    }\n    \n    template<class K>\n    size_type unordered_erase(const K& key, std::size_t hash) {\n        auto it_bucket_key = find_key(key, hash);\n        if(it_bucket_key == m_buckets_data.end()) {\n            return 0;\n        }\n        \n        /**\n         * If we are not erasing the last element in m_values, we swap \n         * the element we are erasing with the last element. We then would \n         * just have to do a pop_back() in m_values.\n         */\n        if(!compare_keys(key, KeySelect()(back()))) {\n            auto it_bucket_last_elem = find_key(KeySelect()(back()), hash_key(KeySelect()(back())));\n            tsl_oh_assert(it_bucket_last_elem != m_buckets_data.end());\n            tsl_oh_assert(it_bucket_last_elem->index() == m_values.size() - 1);\n            \n            using std::swap;\n            swap(m_values[it_bucket_key->index()], m_values[it_bucket_last_elem->index()]);\n            swap(it_bucket_key->index_ref(), it_bucket_last_elem->index_ref());\n        }\n        \n        erase_value_from_bucket(it_bucket_key);\n        \n        return 1;\n    }\n    \n    template<class Serializer>\n    void serialize(Serializer& serializer) const {\n        serialize_impl(serializer);\n    }\n    \n    template<class Deserializer>\n    void deserialize(Deserializer& deserializer, bool hash_compatible) {\n        deserialize_impl(deserializer, hash_compatible);\n    }\n    \n    friend bool operator==(const ordered_hash& lhs, const ordered_hash& rhs) {\n        return lhs.m_values == rhs.m_values;\n    }\n    \n    friend bool operator!=(const ordered_hash& lhs, const ordered_hash& rhs) {\n        return lhs.m_values != rhs.m_values;\n    }\n    \n    friend bool operator<(const ordered_hash& lhs, const ordered_hash& rhs) {\n        return lhs.m_values < rhs.m_values;\n    }\n    \n    friend bool operator<=(const ordered_hash& lhs, const ordered_hash& rhs) {\n        return lhs.m_values <= rhs.m_values;\n    }\n    \n    friend bool operator>(const ordered_hash& lhs, const ordered_hash& rhs) {\n        return lhs.m_values > rhs.m_values;\n    }\n    \n    friend bool operator>=(const ordered_hash& lhs, const ordered_hash& rhs) {\n        return lhs.m_values >= rhs.m_values;\n    }\n    \n    \nprivate:\n    template<class K>\n    std::size_t hash_key(const K& key) const {\n        return Hash::operator()(key);\n    }\n    \n    template<class K1, class K2>\n    bool compare_keys(const K1& key1, const K2& key2) const {\n        return KeyEqual::operator()(key1, key2);\n    }\n    \n    template<class K>\n    typename buckets_container_type::iterator find_key(const K& key, std::size_t hash) {\n        auto it = static_cast<const ordered_hash*>(this)->find_key(key, hash);\n        return m_buckets_data.begin() + std::distance(m_buckets_data.cbegin(), it);\n    }\n    \n    /**\n     * Return bucket which has the key 'key' or m_buckets_data.end() if none.\n     * \n     * From the bucket_for_hash, search for the value until we either find an empty bucket\n     * or a bucket which has a value with a distance from its ideal bucket longer\n     * than the probe length for the value we are looking for.\n     */\n    template<class K>\n    typename buckets_container_type::const_iterator find_key(const K& key, std::size_t hash) const {\n        for(std::size_t ibucket = bucket_for_hash(hash), dist_from_ideal_bucket = 0; ; \n            ibucket = next_bucket(ibucket), dist_from_ideal_bucket++) \n        {\n            if(m_buckets[ibucket].empty()) {\n                return m_buckets_data.end();\n            }\n            else if(m_buckets[ibucket].truncated_hash() == bucket_entry::truncate_hash(hash) && \n                    compare_keys(key, KeySelect()(m_values[m_buckets[ibucket].index()]))) \n            {\n                return m_buckets_data.begin() + ibucket;\n            }\n            else if(dist_from_ideal_bucket > distance_from_ideal_bucket(ibucket)) {\n                return m_buckets_data.end();\n            }\n        }\n    }\n    \n    void rehash_impl(size_type bucket_count) {\n        tsl_oh_assert(bucket_count >= size_type(std::ceil(float(size())/max_load_factor())));\n        \n        if(bucket_count > max_bucket_count()) {\n            TSL_OH_THROW_OR_TERMINATE(std::length_error, \"The map exceeds its maxmimum size.\");\n        }\n        \n        if(bucket_count > 0) {\n            bucket_count = round_up_to_power_of_two(bucket_count);\n        }\n        \n        if(bucket_count == this->bucket_count()) {\n            return;\n        }\n        \n        \n        buckets_container_type old_buckets(bucket_count);\n        m_buckets_data.swap(old_buckets);\n        m_buckets = m_buckets_data.empty()?static_empty_bucket_ptr():\n                                           m_buckets_data.data();\n        // Everything should be noexcept from here.\n        \n        m_mask = (bucket_count > 0)?(bucket_count - 1):0;\n        this->max_load_factor(m_max_load_factor);\n        m_grow_on_next_insert = false;\n        \n        \n        \n        for(const bucket_entry& old_bucket: old_buckets) {\n            if(old_bucket.empty()) {\n                continue;\n            }\n            \n            truncated_hash_type insert_hash = old_bucket.truncated_hash();\n            index_type insert_index = old_bucket.index();\n            \n            for(std::size_t ibucket = bucket_for_hash(insert_hash), dist_from_ideal_bucket = 0; ; \n                ibucket = next_bucket(ibucket), dist_from_ideal_bucket++) \n            {\n                if(m_buckets[ibucket].empty()) {\n                    m_buckets[ibucket].set_index(insert_index);\n                    m_buckets[ibucket].set_hash(insert_hash);\n                    break;\n                }\n                \n                const std::size_t distance = distance_from_ideal_bucket(ibucket);\n                if(dist_from_ideal_bucket > distance) {\n                    std::swap(insert_index, m_buckets[ibucket].index_ref());\n                    std::swap(insert_hash, m_buckets[ibucket].truncated_hash_ref());\n                    dist_from_ideal_bucket = distance;\n                }\n            }\n        }\n    }\n    \n    template<class T = values_container_type, typename std::enable_if<is_vector<T>::value>::type* = nullptr>\n    void reserve_space_for_values(size_type count) {\n        m_values.reserve(count);\n    }\n    \n    template<class T = values_container_type, typename std::enable_if<!is_vector<T>::value>::type* = nullptr>\n    void reserve_space_for_values(size_type /*count*/) {\n    }\n    \n    /**\n     * Swap the empty bucket with the values on its right until we cross another empty bucket\n     * or if the other bucket has a distance_from_ideal_bucket == 0.\n     */\n    void backward_shift(std::size_t empty_ibucket) noexcept {\n        tsl_oh_assert(m_buckets[empty_ibucket].empty());\n        \n        std::size_t previous_ibucket = empty_ibucket;\n        for(std::size_t current_ibucket = next_bucket(previous_ibucket); \n            !m_buckets[current_ibucket].empty() && distance_from_ideal_bucket(current_ibucket) > 0;\n            previous_ibucket = current_ibucket, current_ibucket = next_bucket(current_ibucket)) \n        {\n            std::swap(m_buckets[current_ibucket], m_buckets[previous_ibucket]);\n        }\n    }\n    \n    void erase_value_from_bucket(typename buckets_container_type::iterator it_bucket) {\n        tsl_oh_assert(it_bucket != m_buckets_data.end() && !it_bucket->empty());\n        \n        m_values.erase(m_values.begin() + it_bucket->index());\n        \n        /*\n         * m_values.erase shifted all the values on the right of the erased value, \n         * shift the indexes by -1 in the buckets array for these values.\n         */\n        if(it_bucket->index() != m_values.size()) {\n            shift_indexes_in_buckets(it_bucket->index(), -1);\n        }        \n        \n        // Mark the bucket as empty and do a backward shift of the values on the right\n        it_bucket->clear();\n        backward_shift(std::size_t(std::distance(m_buckets_data.begin(), it_bucket)));\n    }\n    \n    /**\n     * Go through each value from [from_ivalue, m_values.size()) in m_values and for each\n     * bucket corresponding to the value, shift the index by delta.\n     * \n     * delta must be equal to 1 or -1.\n     */\n    void shift_indexes_in_buckets(index_type from_ivalue, int delta) noexcept  {\n        tsl_oh_assert(delta == 1 || delta == -1);\n        \n        for(std::size_t ivalue = from_ivalue; ivalue < m_values.size(); ivalue++) {\n            // All the values in m_values have been shifted by delta. Find the bucket corresponding \n            // to the value m_values[ivalue]\n            const index_type old_index = static_cast<index_type>(ivalue - delta);\n            \n            std::size_t ibucket = bucket_for_hash(hash_key(KeySelect()(m_values[ivalue])));\n            while(m_buckets[ibucket].index() != old_index) {\n                ibucket = next_bucket(ibucket);\n            }\n            \n            m_buckets[ibucket].set_index(index_type(ivalue));\n        }\n    }\n    \n    template<class K>\n    size_type erase_impl(const K& key, std::size_t hash) {\n        auto it_bucket = find_key(key, hash);\n        if(it_bucket != m_buckets_data.end()) {\n            erase_value_from_bucket(it_bucket);\n            \n            return 1;\n        }\n        else {\n            return 0;\n        }\n    }\n    \n    /**\n     * Insert the element at the end.\n     */\n    template<class K, class... Args>\n    std::pair<iterator, bool> insert_impl(const K& key, Args&&... value_type_args) {\n        const std::size_t hash = hash_key(key);\n        \n        std::size_t ibucket = bucket_for_hash(hash); \n        std::size_t dist_from_ideal_bucket = 0;\n        \n        while(!m_buckets[ibucket].empty() && dist_from_ideal_bucket <= distance_from_ideal_bucket(ibucket)) {\n            if(m_buckets[ibucket].truncated_hash() == bucket_entry::truncate_hash(hash) && \n               compare_keys(key, KeySelect()(m_values[m_buckets[ibucket].index()]))) \n            {\n                return std::make_pair(begin() + m_buckets[ibucket].index(), false);\n            }\n            \n            ibucket = next_bucket(ibucket);\n            dist_from_ideal_bucket++;\n        }\n        \n        if(size() >= max_size()) {\n            TSL_OH_THROW_OR_TERMINATE(std::length_error, \"We reached the maximum size for the hash table.\");\n        }\n        \n        \n        if(grow_on_high_load()) {\n            ibucket = bucket_for_hash(hash);\n            dist_from_ideal_bucket = 0;\n        }\n        \n                \n        m_values.emplace_back(std::forward<Args>(value_type_args)...);\n        insert_index(ibucket, dist_from_ideal_bucket, \n                     index_type(m_values.size() - 1), bucket_entry::truncate_hash(hash));\n        \n        \n        return std::make_pair(std::prev(end()), true);\n    }\n    \n    /**\n     * Insert the element before insert_position.\n     */\n    template<class K, class... Args>\n    std::pair<iterator, bool> insert_at_position_impl(typename values_container_type::const_iterator insert_position,\n                                                      const K& key, Args&&... value_type_args) \n    {\n        const std::size_t hash = hash_key(key);\n        \n        std::size_t ibucket = bucket_for_hash(hash); \n        std::size_t dist_from_ideal_bucket = 0;\n        \n        while(!m_buckets[ibucket].empty() && dist_from_ideal_bucket <= distance_from_ideal_bucket(ibucket)) {\n            if(m_buckets[ibucket].truncated_hash() == bucket_entry::truncate_hash(hash) && \n               compare_keys(key, KeySelect()(m_values[m_buckets[ibucket].index()]))) \n            {\n                return std::make_pair(begin() + m_buckets[ibucket].index(), false);\n            }\n            \n            ibucket = next_bucket(ibucket);\n            dist_from_ideal_bucket++;\n        }\n        \n        if(size() >= max_size()) {\n            TSL_OH_THROW_OR_TERMINATE(std::length_error, \"We reached the maximum size for the hash table.\");\n        }\n        \n        \n        if(grow_on_high_load()) {\n            ibucket = bucket_for_hash(hash);\n            dist_from_ideal_bucket = 0;\n        }\n        \n        \n        const index_type index_insert_position = index_type(std::distance(m_values.cbegin(), insert_position));\n        \n#ifdef TSL_OH_NO_CONTAINER_EMPLACE_CONST_ITERATOR\n        m_values.emplace(m_values.begin() + std::distance(m_values.cbegin(), insert_position), std::forward<Args>(value_type_args)...);\n#else        \n        m_values.emplace(insert_position, std::forward<Args>(value_type_args)...);\n#endif        \n\n        insert_index(ibucket, dist_from_ideal_bucket, \n                     index_insert_position, bucket_entry::truncate_hash(hash));\n        \n        /*\n         * The insertion didn't happend at the end of the m_values container, \n         * we need to shift the indexes in m_buckets_data.\n         */\n        if(index_insert_position != m_values.size() - 1) {\n            shift_indexes_in_buckets(index_insert_position + 1, 1);\n        }\n        \n        return std::make_pair(iterator(m_values.begin() + index_insert_position), true);\n    }\n    \n    void insert_index(std::size_t ibucket, std::size_t dist_from_ideal_bucket, \n                      index_type index_insert, truncated_hash_type hash_insert) noexcept\n    {\n        while(!m_buckets[ibucket].empty()) {\n            const std::size_t distance = distance_from_ideal_bucket(ibucket);\n            if(dist_from_ideal_bucket > distance) {\n                std::swap(index_insert, m_buckets[ibucket].index_ref());\n                std::swap(hash_insert, m_buckets[ibucket].truncated_hash_ref());\n                \n                dist_from_ideal_bucket = distance;\n            }\n\n            \n            ibucket = next_bucket(ibucket);\n            dist_from_ideal_bucket++;\n            \n            \n            if(dist_from_ideal_bucket > REHASH_ON_HIGH_NB_PROBES__NPROBES && !m_grow_on_next_insert &&\n               load_factor() >= REHASH_ON_HIGH_NB_PROBES__MIN_LOAD_FACTOR)\n            {\n                // We don't want to grow the map now as we need this method to be noexcept.\n                // Do it on next insert.\n                m_grow_on_next_insert = true;\n            }\n        }\n        \n        \n        m_buckets[ibucket].set_index(index_insert);\n        m_buckets[ibucket].set_hash(hash_insert); \n    }\n    \n    std::size_t distance_from_ideal_bucket(std::size_t ibucket) const noexcept {\n        const std::size_t ideal_bucket = bucket_for_hash(m_buckets[ibucket].truncated_hash());\n        \n        if(ibucket >= ideal_bucket) {\n            return ibucket - ideal_bucket;\n        }\n        // If the bucket is smaller than the ideal bucket for the value, there was a wrapping at the end of the \n        // bucket array due to the modulo.\n        else {\n            return (bucket_count() + ibucket) - ideal_bucket;\n        }\n    }\n    \n    std::size_t next_bucket(std::size_t index) const noexcept {\n        tsl_oh_assert(index < m_buckets_data.size());\n        \n        index++;\n        return (index < m_buckets_data.size())?index:0;\n    }\n    \n    std::size_t bucket_for_hash(std::size_t hash) const noexcept {\n        return hash & m_mask;\n    }    \n    \n    std::size_t iterator_to_index(const_iterator it) const noexcept {\n        const auto dist = std::distance(cbegin(), it);\n        tsl_oh_assert(dist >= 0);\n        \n        return std::size_t(dist);\n    }\n    \n    /**\n     * Return true if the map has been rehashed.\n     */\n    bool grow_on_high_load() {\n        if(m_grow_on_next_insert || size() >= m_load_threshold) {\n            rehash_impl(std::max(size_type(1), bucket_count() * 2));\n            m_grow_on_next_insert = false;\n            \n            return true;\n        }\n        else {\n            return false;\n        }\n    }\n    \n    template<class Serializer>\n    void serialize_impl(Serializer& serializer) const {\n        const slz_size_type version = SERIALIZATION_PROTOCOL_VERSION;\n        serializer(version);\n        \n        const slz_size_type nb_elements = m_values.size();\n        serializer(nb_elements);\n        \n        const slz_size_type bucket_count = m_buckets_data.size();\n        serializer(bucket_count);\n        \n        const float max_load_factor = m_max_load_factor;\n        serializer(max_load_factor);\n        \n        \n        for(const value_type& value: m_values) {\n            serializer(value);\n        }\n        \n        for(const bucket_entry& bucket: m_buckets_data) {\n            bucket.serialize(serializer);\n        }\n    }\n    \n    template<class Deserializer>\n    void deserialize_impl(Deserializer& deserializer, bool hash_compatible) {\n        tsl_oh_assert(m_buckets_data.empty()); // Current hash table must be empty\n        \n        const slz_size_type version = deserialize_value<slz_size_type>(deserializer);\n        // For now we only have one version of the serialization protocol. \n        // If it doesn't match there is a problem with the file.\n        if(version != SERIALIZATION_PROTOCOL_VERSION) {\n            TSL_OH_THROW_OR_TERMINATE(std::runtime_error, \"Can't deserialize the ordered_map/set. \"\n                                                          \"The protocol version header is invalid.\");\n        }\n        \n        const slz_size_type nb_elements = deserialize_value<slz_size_type>(deserializer);\n        const slz_size_type bucket_count_ds = deserialize_value<slz_size_type>(deserializer);\n        const float max_load_factor = deserialize_value<float>(deserializer);\n        \n        \n        this->max_load_factor(max_load_factor);\n        \n        if(bucket_count_ds == 0) {\n            tsl_oh_assert(nb_elements == 0);\n            return;\n        }\n        \n        \n        if(!hash_compatible) {\n            reserve(numeric_cast<size_type>(nb_elements, \"Deserialized nb_elements is too big.\"));\n            for(slz_size_type el = 0; el < nb_elements; el++) {\n                insert(deserialize_value<value_type>(deserializer));\n            }\n        }\n        else {\n            m_buckets_data.reserve(numeric_cast<size_type>(bucket_count_ds, \"Deserialized bucket_count is too big.\"));\n            m_buckets = m_buckets_data.data(),\n            m_mask = m_buckets_data.capacity() - 1; \n            \n            reserve_space_for_values(numeric_cast<size_type>(nb_elements, \"Deserialized nb_elements is too big.\"));\n            for(slz_size_type el = 0; el < nb_elements; el++) {\n                m_values.push_back(deserialize_value<value_type>(deserializer));\n            }\n            \n            for(slz_size_type b = 0; b < bucket_count_ds; b++) {\n                m_buckets_data.push_back(bucket_entry::deserialize(deserializer));\n            }\n            \n            if(load_factor() > this->max_load_factor()) {\n                TSL_OH_THROW_OR_TERMINATE(std::runtime_error, \"Invalid max_load_factor. Check that the serializer \"\n                                                              \"and deserializer supports floats correctly as they \"\n                                                              \"can be converted implicitely to ints.\");\n            }\n        }\n    }\n    \n    static std::size_t round_up_to_power_of_two(std::size_t value) {\n        if(is_power_of_two(value)) {\n            return value;\n        }\n        \n        if(value == 0) {\n            return 1;\n        }\n        \n        --value;\n        for(std::size_t i = 1; i < sizeof(std::size_t) * CHAR_BIT; i *= 2) {\n            value |= value >> i;\n        }\n        \n        return value + 1;\n    }\n    \n    static constexpr bool is_power_of_two(std::size_t value) {\n        return value != 0 && (value & (value - 1)) == 0;\n    }\n\n    \npublic:\n    static const size_type DEFAULT_INIT_BUCKETS_SIZE = 0;\n    static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.75f;\n\nprivate:    \n    static const size_type REHASH_ON_HIGH_NB_PROBES__NPROBES = 128;\n    static constexpr float REHASH_ON_HIGH_NB_PROBES__MIN_LOAD_FACTOR = 0.15f;\n    \n    /**\n     * Protocol version currenlty used for serialization.\n     */\n    static const slz_size_type SERIALIZATION_PROTOCOL_VERSION = 1;\n    \n    /**\n     * Return an always valid pointer to an static empty bucket_entry with last_bucket() == true.\n     */            \n    bucket_entry* static_empty_bucket_ptr() {\n        static bucket_entry empty_bucket;\n        return &empty_bucket;\n    }\n    \nprivate:\n    buckets_container_type m_buckets_data;\n    \n    /**\n     * Points to m_buckets_data.data() if !m_buckets_data.empty() otherwise points to static_empty_bucket_ptr.\n     * This variable is useful to avoid the cost of checking if m_buckets_data is empty when trying \n     * to find an element.\n     * \n     * TODO Remove m_buckets_data and only use a pointer+size instead of a pointer+vector to save some space in the ordered_hash object.\n     */\n    bucket_entry* m_buckets;\n    \n    size_type m_mask;\n    \n    values_container_type m_values;\n    \n    bool m_grow_on_next_insert;\n    float m_max_load_factor;\n    size_type m_load_threshold;\n};\n\n\n} // end namespace detail_ordered_hash\n\n} // end namespace tsl\n\n#endif\n"
  },
  {
    "path": "extern/tsl-ordered-map/ordered_map.h",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Tessil\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#ifndef TSL_ORDERED_MAP_H\n#define TSL_ORDERED_MAP_H\n\n\n#include <cstddef>\n#include <cstdint>\n#include <deque>\n#include <functional>\n#include <initializer_list>\n#include <memory>\n#include <type_traits>\n#include <utility>\n#include <vector>\n#include \"ordered_hash.h\"\n\n\nnamespace tsl {\n\n\n/**\n * Implementation of an hash map using open adressing with robin hood with backshift delete to resolve collisions.\n * \n * The particularity of this hash map is that it remembers the order in which the elements were added and\n * provide a way to access the structure which stores these values through the 'values_container()' method. \n * The used container is defined by ValueTypeContainer, by default a std::deque is used (grows faster) but\n * a std::vector may be used. In this case the map provides a 'data()' method which give a direct access \n * to the memory used to store the values (which can be usefull to communicate with C API's).\n * \n * The Key and T must be copy constructible and/or move constructible. To use `unordered_erase` they both\n * must be swappable.\n * \n * The behaviour of the hash map is undefinded if the destructor of Key or T throws an exception.\n * \n * By default the maximum size of a map is limited to 2^32 - 1 values, if needed this can be changed through\n * the IndexType template parameter. Using an `uint64_t` will raise this limit to 2^64 - 1 values but each\n * bucket will use 16 bytes instead of 8 bytes in addition to the space needed to store the values.\n * \n * Iterators invalidation:\n *  - clear, operator=, reserve, rehash: always invalidate the iterators (also invalidate end()).\n *  - insert, emplace, emplace_hint, operator[]: when a std::vector is used as ValueTypeContainer \n *                                               and if size() < capacity(), only end(). \n *                                               Otherwise all the iterators are invalidated if an insert occurs.\n *  - erase, unordered_erase: when a std::vector is used as ValueTypeContainer invalidate the iterator of \n *                            the erased element and all the ones after the erased element (including end()). \n *                            Otherwise all the iterators are invalidated if an erase occurs.\n */\ntemplate<class Key, \n         class T, \n         class Hash = std::hash<Key>,\n         class KeyEqual = std::equal_to<Key>,\n         class Allocator = std::allocator<std::pair<Key, T>>,\n         class ValueTypeContainer = std::deque<std::pair<Key, T>, Allocator>,\n         class IndexType = std::uint_least32_t>\nclass ordered_map {\nprivate:\n    template<typename U>\n    using has_is_transparent = tsl::detail_ordered_hash::has_is_transparent<U>;\n    \n    class KeySelect {\n    public:\n        using key_type = Key;\n        \n        const key_type& operator()(const std::pair<Key, T>& key_value) const noexcept {\n            return key_value.first;\n        }\n        \n        key_type& operator()(std::pair<Key, T>& key_value) noexcept {\n            return key_value.first;\n        }\n    };  \n    \n    class ValueSelect {\n    public:\n        using value_type = T;\n        \n        const value_type& operator()(const std::pair<Key, T>& key_value) const noexcept {\n            return key_value.second;\n        }\n        \n        value_type& operator()(std::pair<Key, T>& key_value) noexcept {\n            return key_value.second;\n        }\n    };\n    \n    using ht = detail_ordered_hash::ordered_hash<std::pair<Key, T>, KeySelect, ValueSelect,\n                                                 Hash, KeyEqual, Allocator, ValueTypeContainer, IndexType>;\n    \npublic:\n    using key_type = typename ht::key_type;\n    using mapped_type = T;\n    using value_type = typename ht::value_type;\n    using size_type = typename ht::size_type;\n    using difference_type = typename ht::difference_type;\n    using hasher = typename ht::hasher;\n    using key_equal = typename ht::key_equal;\n    using allocator_type = typename ht::allocator_type;\n    using reference = typename ht::reference;\n    using const_reference = typename ht::const_reference;\n    using pointer = typename ht::pointer;\n    using const_pointer = typename ht::const_pointer;\n    using iterator = typename ht::iterator;\n    using const_iterator = typename ht::const_iterator;\n    using reverse_iterator = typename ht::reverse_iterator;\n    using const_reverse_iterator = typename ht::const_reverse_iterator;\n    \n    using values_container_type = typename ht::values_container_type;\n    \n    \n    /*\n     * Constructors\n     */\n    ordered_map(): ordered_map(ht::DEFAULT_INIT_BUCKETS_SIZE) {\n    }\n    \n    explicit ordered_map(size_type bucket_count, \n                         const Hash& hash = Hash(),\n                         const KeyEqual& equal = KeyEqual(),\n                         const Allocator& alloc = Allocator()): \n                     m_ht(bucket_count, hash, equal, alloc, ht::DEFAULT_MAX_LOAD_FACTOR)\n    {\n    }\n    \n    ordered_map(size_type bucket_count,\n                const Allocator& alloc): ordered_map(bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n    \n    ordered_map(size_type bucket_count,\n                const Hash& hash,\n                const Allocator& alloc): ordered_map(bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n    \n    explicit ordered_map(const Allocator& alloc): ordered_map(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {\n    }\n    \n    template<class InputIt>\n    ordered_map(InputIt first, InputIt last,\n                size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,\n                const Hash& hash = Hash(),\n                const KeyEqual& equal = KeyEqual(),\n                const Allocator& alloc = Allocator()): ordered_map(bucket_count, hash, equal, alloc)\n    {\n        insert(first, last);\n    }\n    \n    template<class InputIt>\n    ordered_map(InputIt first, InputIt last,\n                size_type bucket_count,\n                const Allocator& alloc): ordered_map(first, last, bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n    \n    template<class InputIt>\n    ordered_map(InputIt first, InputIt last,\n                size_type bucket_count,\n                const Hash& hash,\n                const Allocator& alloc): ordered_map(first, last, bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n\n    ordered_map(std::initializer_list<value_type> init,\n                size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,\n                const Hash& hash = Hash(),\n                const KeyEqual& equal = KeyEqual(),\n                const Allocator& alloc = Allocator()): \n            ordered_map(init.begin(), init.end(), bucket_count, hash, equal, alloc)\n    {\n    }\n\n    ordered_map(std::initializer_list<value_type> init,\n                size_type bucket_count,\n                const Allocator& alloc): \n            ordered_map(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n\n    ordered_map(std::initializer_list<value_type> init,\n                size_type bucket_count,\n                const Hash& hash,\n                const Allocator& alloc): \n            ordered_map(init.begin(), init.end(), bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n\n    \n    ordered_map& operator=(std::initializer_list<value_type> ilist) {\n        m_ht.clear();\n        \n        m_ht.reserve(ilist.size());\n        m_ht.insert(ilist.begin(), ilist.end());\n        \n        return *this;\n    }\n    \n    allocator_type get_allocator() const { return m_ht.get_allocator(); }\n    \n\n    \n    /*\n     * Iterators\n     */\n    iterator begin() noexcept { return m_ht.begin(); }\n    const_iterator begin() const noexcept { return m_ht.begin(); }\n    const_iterator cbegin() const noexcept { return m_ht.cbegin(); }\n    \n    iterator end() noexcept { return m_ht.end(); }\n    const_iterator end() const noexcept { return m_ht.end(); }\n    const_iterator cend() const noexcept { return m_ht.cend(); }\n    \n    reverse_iterator rbegin() noexcept { return m_ht.rbegin(); }\n    const_reverse_iterator rbegin() const noexcept { return m_ht.rbegin(); }\n    const_reverse_iterator rcbegin() const noexcept { return m_ht.rcbegin(); }\n    \n    reverse_iterator rend() noexcept { return m_ht.rend(); }\n    const_reverse_iterator rend() const noexcept { return m_ht.rend(); }\n    const_reverse_iterator rcend() const noexcept { return m_ht.rcend(); }\n    \n    \n    /*\n     * Capacity\n     */\n    bool empty() const noexcept { return m_ht.empty(); }\n    size_type size() const noexcept { return m_ht.size(); }\n    size_type max_size() const noexcept { return m_ht.max_size(); }\n    \n    /*\n     * Modifiers\n     */\n    void clear() noexcept { m_ht.clear(); }\n    \n    \n    \n    std::pair<iterator, bool> insert(const value_type& value) { return m_ht.insert(value); }\n        \n    template<class P, typename std::enable_if<std::is_constructible<value_type, P&&>::value>::type* = nullptr>\n    std::pair<iterator, bool> insert(P&& value) { return m_ht.emplace(std::forward<P>(value)); }\n    \n    std::pair<iterator, bool> insert(value_type&& value) { return m_ht.insert(std::move(value)); }\n    \n    \n    iterator insert(const_iterator hint, const value_type& value) {\n        return m_ht.insert_hint(hint, value);\n    }\n        \n    template<class P, typename std::enable_if<std::is_constructible<value_type, P&&>::value>::type* = nullptr>\n    iterator insert(const_iterator hint, P&& value) {\n        return m_ht.emplace_hint(hint, std::forward<P>(value));\n    }\n    \n    iterator insert(const_iterator hint, value_type&& value) { \n        return m_ht.insert_hint(hint, std::move(value));\n    }\n    \n    \n    template<class InputIt>\n    void insert(InputIt first, InputIt last) { m_ht.insert(first, last); }\n    void insert(std::initializer_list<value_type> ilist) { m_ht.insert(ilist.begin(), ilist.end()); }\n\n    \n    \n    \n    template<class M>\n    std::pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj) { \n        return m_ht.insert_or_assign(k, std::forward<M>(obj)); \n    }\n\n    template<class M>\n    std::pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj) { \n        return m_ht.insert_or_assign(std::move(k), std::forward<M>(obj)); \n    }\n    \n    \n    template<class M>\n    iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj) {\n        return m_ht.insert_or_assign(hint, k, std::forward<M>(obj));\n    }\n    \n    template<class M>\n    iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj) {\n        return m_ht.insert_or_assign(hint, std::move(k), std::forward<M>(obj));\n    }\n    \n    /**\n     * Due to the way elements are stored, emplace will need to move or copy the key-value once.\n     * The method is equivalent to insert(value_type(std::forward<Args>(args)...));\n     * \n     * Mainly here for compatibility with the std::unordered_map interface.\n     */\n    template<class... Args>\n    std::pair<iterator, bool> emplace(Args&&... args) { return m_ht.emplace(std::forward<Args>(args)...); }\n    \n    /**\n     * Due to the way elements are stored, emplace_hint will need to move or copy the key-value once.\n     * The method is equivalent to insert(hint, value_type(std::forward<Args>(args)...));\n     * \n     * Mainly here for compatibility with the std::unordered_map interface.\n     */\n    template <class... Args>\n    iterator emplace_hint(const_iterator hint, Args&&... args) {\n        return m_ht.emplace_hint(hint, std::forward<Args>(args)...);\n    }\n    \n    \n    \n    \n    template<class... Args>\n    std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args) { \n        return m_ht.try_emplace(k, std::forward<Args>(args)...);\n    }\n    \n    template<class... Args>\n    std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args) {\n        return m_ht.try_emplace(std::move(k), std::forward<Args>(args)...);\n    }\n    \n    template<class... Args>\n    iterator try_emplace(const_iterator hint, const key_type& k, Args&&... args) {\n        return m_ht.try_emplace_hint(hint, k, std::forward<Args>(args)...);\n    }\n    \n    template<class... Args>\n    iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args) {\n        return m_ht.try_emplace_hint(hint, std::move(k), std::forward<Args>(args)...);\n    }\n    \n    \n    \n\n    /**\n     * When erasing an element, the insert order will be preserved and no holes will be present in the container\n     * returned by 'values_container()'. \n     * \n     * The method is in O(n), if the order is not important 'unordered_erase(...)' method is faster with an O(1)\n     * average complexity.\n     */\n    iterator erase(iterator pos) { return m_ht.erase(pos); }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     */\n    iterator erase(const_iterator pos) { return m_ht.erase(pos); }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     */    \n    iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     */    \n    size_type erase(const key_type& key) { return m_ht.erase(key); }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash.\n     */    \n    size_type erase(const key_type& key, std::size_t precalculated_hash) { \n        return m_ht.erase(key, precalculated_hash); \n    }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     * \n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type erase(const K& key) { return m_ht.erase(key); }\n    \n    /**\n     * @copydoc erase(const key_type& key, std::size_t precalculated_hash)\n     * \n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type erase(const K& key, std::size_t precalculated_hash) { \n        return m_ht.erase(key, precalculated_hash); \n    }\n    \n    \n    \n    void swap(ordered_map& other) { other.m_ht.swap(m_ht); }\n    \n    /*\n     * Lookup\n     */\n    T& at(const Key& key) { return m_ht.at(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    T& at(const Key& key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); }\n    \n    \n    const T& at(const Key& key) const { return m_ht.at(key); }\n    \n    /**\n     * @copydoc at(const Key& key, std::size_t precalculated_hash)\n     */\n    const T& at(const Key& key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); }\n    \n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    T& at(const K& key) { return m_ht.at(key); }\n    \n    /**\n     * @copydoc at(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    T& at(const K& key, std::size_t precalculated_hash) { return m_ht.at(key, precalculated_hash); }\n    \n    /**\n     * @copydoc at(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>     \n    const T& at(const K& key) const { return m_ht.at(key); }\n    \n    /**\n     * @copydoc at(const K& key, std::size_t precalculated_hash)\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const T& at(const K& key, std::size_t precalculated_hash) const { return m_ht.at(key, precalculated_hash); }\n    \n    \n    \n    T& operator[](const Key& key) { return m_ht[key]; }    \n    T& operator[](Key&& key) { return m_ht[std::move(key)]; }\n    \n    \n    \n    size_type count(const Key& key) const { return m_ht.count(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    size_type count(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.count(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>     \n    size_type count(const K& key) const { return m_ht.count(key); }\n    \n    /**\n     * @copydoc count(const K& key) const\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */     \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type count(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.count(key, precalculated_hash);\n    }\n    \n    \n    \n    iterator find(const Key& key) { return m_ht.find(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    iterator find(const Key& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); }\n    \n    const_iterator find(const Key& key) const { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const Key& key, std::size_t precalculated_hash)\n     */\n    const_iterator find(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.find(key, precalculated_hash);\n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    iterator find(const K& key) { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    iterator find(const K& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); }\n    \n    /**\n     * @copydoc find(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const_iterator find(const K& key) const { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const_iterator find(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.find(key, precalculated_hash); \n    }\n    \n    \n    \n    std::pair<iterator, iterator> equal_range(const Key& key) { return m_ht.equal_range(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    std::pair<iterator, iterator> equal_range(const Key& key, std::size_t precalculated_hash) { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    std::pair<const_iterator, const_iterator> equal_range(const Key& key) const { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const Key& key, std::size_t precalculated_hash)\n     */\n    std::pair<const_iterator, const_iterator> equal_range(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n\n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>     \n    std::pair<iterator, iterator> equal_range(const K& key) { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<iterator, iterator> equal_range(const K& key, std::size_t precalculated_hash) { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    /**\n     * @copydoc equal_range(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>     \n    std::pair<const_iterator, const_iterator> equal_range(const K& key) const { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const K& key, std::size_t precalculated_hash)\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<const_iterator, const_iterator> equal_range(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    \n    \n    /*\n     * Bucket interface \n     */\n    size_type bucket_count() const { return m_ht.bucket_count(); }\n    size_type max_bucket_count() const { return m_ht.max_bucket_count(); }\n    \n    \n    /*\n     * Hash policy \n     */\n    float load_factor() const { return m_ht.load_factor(); }\n    float max_load_factor() const { return m_ht.max_load_factor(); }\n    void max_load_factor(float ml) { m_ht.max_load_factor(ml); }\n    \n    void rehash(size_type count) { m_ht.rehash(count); }\n    void reserve(size_type count) { m_ht.reserve(count); }\n    \n    \n    /*\n     * Observers\n     */\n    hasher hash_function() const { return m_ht.hash_function(); }\n    key_equal key_eq() const { return m_ht.key_eq(); }\n    \n    \n    \n    /*\n     * Other\n     */\n    \n    /**\n     * Convert a const_iterator to an iterator.\n     */\n    iterator mutable_iterator(const_iterator pos) {\n        return m_ht.mutable_iterator(pos);\n    }\n    \n    /**\n     * Requires index <= size().\n     * \n     * Return an iterator to the element at index. Return end() if index == size().\n     */\n    iterator nth(size_type index) { return m_ht.nth(index); }\n    \n    /**\n     * @copydoc nth(size_type index)\n     */\n    const_iterator nth(size_type index) const { return m_ht.nth(index); }\n    \n    \n    /**\n     * Return const_reference to the first element. Requires the container to not be empty.\n     */\n    const_reference front() const { return m_ht.front(); }\n    \n    /**\n     * Return const_reference to the last element. Requires the container to not be empty.\n     */\n    const_reference back() const { return m_ht.back(); }\n    \n    \n    /**\n     * Only available if ValueTypeContainer is a std::vector. Same as calling 'values_container().data()'.\n     */\n    template<class U = values_container_type, typename std::enable_if<tsl::detail_ordered_hash::is_vector<U>::value>::type* = nullptr>    \n    const typename values_container_type::value_type* data() const noexcept { return m_ht.data(); }\n        \n    /**\n     * Return the container in which the values are stored. The values are in the same order as the insertion order\n     * and are contiguous in the structure, no holes (size() == values_container().size()).\n     */\n    const values_container_type& values_container() const noexcept { return m_ht.values_container(); }\n\n    template<class U = values_container_type, typename std::enable_if<tsl::detail_ordered_hash::is_vector<U>::value>::type* = nullptr>    \n    size_type capacity() const noexcept { return m_ht.capacity(); }\n    \n    void shrink_to_fit() { m_ht.shrink_to_fit(); }\n    \n    \n    \n    /**\n     * Insert the value before pos shifting all the elements on the right of pos (including pos) one position \n     * to the right.\n     * \n     * Amortized linear time-complexity in the distance between pos and end().\n     */\n    std::pair<iterator, bool> insert_at_position(const_iterator pos, const value_type& value) { \n        return m_ht.insert_at_position(pos, value); \n    }\n    \n    /**\n     * @copydoc insert_at_position(const_iterator pos, const value_type& value)\n     */\n    std::pair<iterator, bool> insert_at_position(const_iterator pos, value_type&& value) { \n        return m_ht.insert_at_position(pos, std::move(value)); \n    }\n    \n    /**\n     * @copydoc insert_at_position(const_iterator pos, const value_type& value)\n     * \n     * Same as insert_at_position(pos, value_type(std::forward<Args>(args)...), mainly\n     * here for coherence.\n     */\n    template<class... Args>\n    std::pair<iterator, bool> emplace_at_position(const_iterator pos, Args&&... args) {\n        return m_ht.emplace_at_position(pos, std::forward<Args>(args)...); \n    }\n       \n    /**\n     * @copydoc insert_at_position(const_iterator pos, const value_type& value)\n     */       \n    template<class... Args>\n    std::pair<iterator, bool> try_emplace_at_position(const_iterator pos, const key_type& k, Args&&... args) { \n        return m_ht.try_emplace_at_position(pos, k, std::forward<Args>(args)...);\n    }\n    \n    /**\n     * @copydoc insert_at_position(const_iterator pos, const value_type& value)\n     */    \n    template<class... Args>\n    std::pair<iterator, bool> try_emplace_at_position(const_iterator pos, key_type&& k, Args&&... args) {\n        return m_ht.try_emplace_at_position(pos, std::move(k), std::forward<Args>(args)...);\n    }\n    \n    \n    \n    void pop_back() { m_ht.pop_back(); }\n    \n    /**\n     * Faster erase operation with an O(1) average complexity but it doesn't preserve the insertion order.\n     * \n     * If an erasure occurs, the last element of the map will take the place of the erased element.\n     */\n    iterator unordered_erase(iterator pos) { return m_ht.unordered_erase(pos); }\n    \n    /**\n     * @copydoc unordered_erase(iterator pos)\n     */\n    iterator unordered_erase(const_iterator pos) { return m_ht.unordered_erase(pos); }\n    \n    /**\n     * @copydoc unordered_erase(iterator pos)\n     */    \n    size_type unordered_erase(const key_type& key) { return m_ht.unordered_erase(key); }\n    \n    /**\n     * @copydoc unordered_erase(iterator pos)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */    \n    size_type unordered_erase(const key_type& key, std::size_t precalculated_hash) { \n        return m_ht.unordered_erase(key, precalculated_hash); \n    }\n    \n    /**\n     * @copydoc unordered_erase(iterator pos)\n     * \n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type unordered_erase(const K& key) { return m_ht.unordered_erase(key); }\n    \n    /**\n     * @copydoc unordered_erase(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type unordered_erase(const K& key, std::size_t precalculated_hash) { \n        return m_ht.unordered_erase(key, precalculated_hash); \n    }\n    \n    /**\n     * Serialize the map through the `serializer` parameter.\n     * \n     * The `serializer` parameter must be a function object that supports the following call:\n     *  - `template<typename U> void operator()(const U& value);` where the types `std::uint64_t`, `float` and `std::pair<Key, T>` must be supported for U.\n     * \n     * The implementation leaves binary compatibilty (endianness, IEEE 754 for floats, ...) of the types it serializes\n     * in the hands of the `Serializer` function object if compatibilty is required.\n     */\n    template<class Serializer>\n    void serialize(Serializer& serializer) const {\n        m_ht.serialize(serializer);\n    }\n\n    /**\n     * Deserialize a previouly serialized map through the `deserializer` parameter.\n     * \n     * The `deserializer` parameter must be a function object that supports the following calls:\n     *  - `template<typename U> U operator()();` where the types `std::uint64_t`, `float` and `std::pair<Key, T>` must be supported for U.\n     * \n     * If the deserialized hash map type is hash compatible with the serialized map, the deserialization process can be\n     * sped up by setting `hash_compatible` to true. To be hash compatible, the Hash and KeyEqual must behave the same way \n     * than the ones used on the serialized map. The `std::size_t` must also be of the same size as the one on the platform used\n     * to serialize the map, the same apply for `IndexType`. If these criteria are not met, the behaviour is undefined with \n     * `hash_compatible` sets to true.\n     * \n     * The behaviour is undefined if the type `Key` and `T` of the `ordered_map` are not the same as the\n     * types used during serialization.\n     * \n     * The implementation leaves binary compatibilty (endianness, IEEE 754 for floats, size of int, ...) of the types it \n     * deserializes in the hands of the `Deserializer` function object if compatibilty is required.\n     */\n    template<class Deserializer>\n    static ordered_map deserialize(Deserializer& deserializer, bool hash_compatible = false) {\n        ordered_map map(0);\n        map.m_ht.deserialize(deserializer, hash_compatible);\n\n        return map;\n    }\n    \n    \n    \n    friend bool operator==(const ordered_map& lhs, const ordered_map& rhs) { return lhs.m_ht == rhs.m_ht; }\n    friend bool operator!=(const ordered_map& lhs, const ordered_map& rhs) { return lhs.m_ht != rhs.m_ht; }\n    friend bool operator<(const ordered_map& lhs, const ordered_map& rhs) { return lhs.m_ht < rhs.m_ht; }\n    friend bool operator<=(const ordered_map& lhs, const ordered_map& rhs) { return lhs.m_ht <= rhs.m_ht; }\n    friend bool operator>(const ordered_map& lhs, const ordered_map& rhs) { return lhs.m_ht > rhs.m_ht; }\n    friend bool operator>=(const ordered_map& lhs, const ordered_map& rhs) { return lhs.m_ht >= rhs.m_ht; }\n    \n    friend void swap(ordered_map& lhs, ordered_map& rhs) { lhs.swap(rhs); }\n\nprivate:\n    ht m_ht;\n};\n\n} // end namespace tsl\n\n#endif\n"
  },
  {
    "path": "extern/tsl-ordered-map/ordered_set.h",
    "content": "/**\n * MIT License\n * \n * Copyright (c) 2017 Tessil\n * \n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n * \n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n * \n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n */\n#ifndef TSL_ORDERED_SET_H\n#define TSL_ORDERED_SET_H\n\n\n#include <cstddef>\n#include <cstdint>\n#include <deque>\n#include <functional>\n#include <initializer_list>\n#include <memory>\n#include <type_traits>\n#include <utility>\n#include <vector>\n#include \"ordered_hash.h\"\n\n\nnamespace tsl {\n\n\n/**\n * Implementation of an hash set using open adressing with robin hood with backshift delete to resolve collisions.\n * \n * The particularity of this hash set is that it remembers the order in which the elements were added and\n * provide a way to access the structure which stores these values through the 'values_container()' method. \n * The used container is defined by ValueTypeContainer, by default a std::deque is used (grows faster) but\n * a std::vector may be used. In this case the set provides a 'data()' method which give a direct access \n * to the memory used to store the values (which can be usefull to communicate with C API's).\n * \n * The Key must be copy constructible and/or move constructible. To use `unordered_erase` it also must be swappable.\n * \n * The behaviour of the hash set is undefinded if the destructor of Key throws an exception.\n * \n * By default the maximum size of a set is limited to 2^32 - 1 values, if needed this can be changed through\n * the IndexType template parameter. Using an `uint64_t` will raise this limit to 2^64 - 1 values but each\n * bucket will use 16 bytes instead of 8 bytes in addition to the space needed to store the values.\n * \n * Iterators invalidation:\n *  - clear, operator=, reserve, rehash: always invalidate the iterators (also invalidate end()).\n *  - insert, emplace, emplace_hint, operator[]: when a std::vector is used as ValueTypeContainer \n *                                               and if size() < capacity(), only end(). \n *                                               Otherwise all the iterators are invalidated if an insert occurs.\n *  - erase, unordered_erase: when a std::vector is used as ValueTypeContainer invalidate the iterator of \n *                            the erased element and all the ones after the erased element (including end()). \n *                            Otherwise all the iterators are invalidated if an erase occurs.\n */\ntemplate<class Key, \n         class Hash = std::hash<Key>,\n         class KeyEqual = std::equal_to<Key>,\n         class Allocator = std::allocator<Key>,\n         class ValueTypeContainer = std::deque<Key, Allocator>,\n         class IndexType = std::uint_least32_t>\nclass ordered_set {\nprivate:\n    template<typename U>\n    using has_is_transparent = tsl::detail_ordered_hash::has_is_transparent<U>;\n    \n    class KeySelect {\n    public:\n        using key_type = Key;\n        \n        const key_type& operator()(const Key& key) const noexcept {\n            return key;\n        }\n        \n        key_type& operator()(Key& key) noexcept {\n            return key;\n        }\n    };\n    \n    using ht = detail_ordered_hash::ordered_hash<Key, KeySelect, void,\n                                                 Hash, KeyEqual, Allocator, ValueTypeContainer, IndexType>;\n            \npublic:\n    using key_type = typename ht::key_type;\n    using value_type = typename ht::value_type;\n    using size_type = typename ht::size_type;\n    using difference_type = typename ht::difference_type;\n    using hasher = typename ht::hasher;\n    using key_equal = typename ht::key_equal;\n    using allocator_type = typename ht::allocator_type;\n    using reference = typename ht::reference;\n    using const_reference = typename ht::const_reference;\n    using pointer = typename ht::pointer;\n    using const_pointer = typename ht::const_pointer;\n    using iterator = typename ht::iterator;\n    using const_iterator = typename ht::const_iterator;\n    using reverse_iterator = typename ht::reverse_iterator;\n    using const_reverse_iterator = typename ht::const_reverse_iterator;\n    \n    using values_container_type = typename ht::values_container_type;\n\n    \n    /*\n     * Constructors\n     */\n    ordered_set(): ordered_set(ht::DEFAULT_INIT_BUCKETS_SIZE) {\n    }\n    \n    explicit ordered_set(size_type bucket_count, \n                         const Hash& hash = Hash(),\n                         const KeyEqual& equal = KeyEqual(),\n                         const Allocator& alloc = Allocator()): \n                        m_ht(bucket_count, hash, equal, alloc, ht::DEFAULT_MAX_LOAD_FACTOR)\n    {\n    }\n    \n    ordered_set(size_type bucket_count,\n                const Allocator& alloc): ordered_set(bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n    \n    ordered_set(size_type bucket_count,\n                const Hash& hash,\n                const Allocator& alloc): ordered_set(bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n    \n    explicit ordered_set(const Allocator& alloc): ordered_set(ht::DEFAULT_INIT_BUCKETS_SIZE, alloc) {\n    }\n    \n    template<class InputIt>\n    ordered_set(InputIt first, InputIt last,\n                size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,\n                const Hash& hash = Hash(),\n                const KeyEqual& equal = KeyEqual(),\n                const Allocator& alloc = Allocator()): ordered_set(bucket_count, hash, equal, alloc)\n    {\n        insert(first, last);\n    }\n    \n    template<class InputIt>\n    ordered_set(InputIt first, InputIt last,\n                size_type bucket_count,\n                const Allocator& alloc): ordered_set(first, last, bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n    \n    template<class InputIt>\n    ordered_set(InputIt first, InputIt last,\n                size_type bucket_count,\n                const Hash& hash,\n                const Allocator& alloc): ordered_set(first, last, bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n\n    ordered_set(std::initializer_list<value_type> init,\n                size_type bucket_count = ht::DEFAULT_INIT_BUCKETS_SIZE,\n                const Hash& hash = Hash(),\n                const KeyEqual& equal = KeyEqual(),\n                const Allocator& alloc = Allocator()): \n            ordered_set(init.begin(), init.end(), bucket_count, hash, equal, alloc)\n    {\n    }\n\n    ordered_set(std::initializer_list<value_type> init,\n                size_type bucket_count,\n                const Allocator& alloc): \n            ordered_set(init.begin(), init.end(), bucket_count, Hash(), KeyEqual(), alloc)\n    {\n    }\n\n    ordered_set(std::initializer_list<value_type> init,\n                size_type bucket_count,\n                const Hash& hash,\n                const Allocator& alloc): \n            ordered_set(init.begin(), init.end(), bucket_count, hash, KeyEqual(), alloc)\n    {\n    }\n\n    \n    ordered_set& operator=(std::initializer_list<value_type> ilist) {\n        m_ht.clear();\n        \n        m_ht.reserve(ilist.size());\n        m_ht.insert(ilist.begin(), ilist.end());\n        \n        return *this;\n    }\n    \n    allocator_type get_allocator() const { return m_ht.get_allocator(); }\n    \n    \n    /*\n     * Iterators\n     */\n    iterator begin() noexcept { return m_ht.begin(); }\n    const_iterator begin() const noexcept { return m_ht.begin(); }\n    const_iterator cbegin() const noexcept { return m_ht.cbegin(); }\n    \n    iterator end() noexcept { return m_ht.end(); }\n    const_iterator end() const noexcept { return m_ht.end(); }\n    const_iterator cend() const noexcept { return m_ht.cend(); }\n    \n    reverse_iterator rbegin() noexcept { return m_ht.rbegin(); }\n    const_reverse_iterator rbegin() const noexcept { return m_ht.rbegin(); }\n    const_reverse_iterator rcbegin() const noexcept { return m_ht.rcbegin(); }\n    \n    reverse_iterator rend() noexcept { return m_ht.rend(); }\n    const_reverse_iterator rend() const noexcept { return m_ht.rend(); }\n    const_reverse_iterator rcend() const noexcept { return m_ht.rcend(); }\n    \n    \n    /*\n     * Capacity\n     */\n    bool empty() const noexcept { return m_ht.empty(); }\n    size_type size() const noexcept { return m_ht.size(); }\n    size_type max_size() const noexcept { return m_ht.max_size(); }\n    \n    /*\n     * Modifiers\n     */\n    void clear() noexcept { m_ht.clear(); }\n    \n    \n    \n    std::pair<iterator, bool> insert(const value_type& value) { return m_ht.insert(value); }\n    std::pair<iterator, bool> insert(value_type&& value) { return m_ht.insert(std::move(value)); }\n    \n    iterator insert(const_iterator hint, const value_type& value) {\n        return m_ht.insert_hint(hint, value); \n    }\n    \n    iterator insert(const_iterator hint, value_type&& value) { \n        return m_ht.insert_hint(hint, std::move(value)); \n    }\n    \n    template<class InputIt>\n    void insert(InputIt first, InputIt last) { m_ht.insert(first, last); }\n    void insert(std::initializer_list<value_type> ilist) { m_ht.insert(ilist.begin(), ilist.end()); }\n\n    \n    \n    /**\n     * Due to the way elements are stored, emplace will need to move or copy the key-value once.\n     * The method is equivalent to insert(value_type(std::forward<Args>(args)...));\n     * \n     * Mainly here for compatibility with the std::unordered_map interface.\n     */\n    template<class... Args>\n    std::pair<iterator, bool> emplace(Args&&... args) { return m_ht.emplace(std::forward<Args>(args)...); }\n    \n    /**\n     * Due to the way elements are stored, emplace_hint will need to move or copy the key-value once.\n     * The method is equivalent to insert(hint, value_type(std::forward<Args>(args)...));\n     * \n     * Mainly here for compatibility with the std::unordered_map interface.\n     */\n    template<class... Args>\n    iterator emplace_hint(const_iterator hint, Args&&... args) {\n        return m_ht.emplace_hint(hint, std::forward<Args>(args)...); \n    }\n\n    /**\n     * When erasing an element, the insert order will be preserved and no holes will be present in the container\n     * returned by 'values_container()'. \n     * \n     * The method is in O(n), if the order is not important 'unordered_erase(...)' method is faster with an O(1)\n     * average complexity.\n     */    \n    iterator erase(iterator pos) { return m_ht.erase(pos); }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     */    \n    iterator erase(const_iterator pos) { return m_ht.erase(pos); }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     */    \n    iterator erase(const_iterator first, const_iterator last) { return m_ht.erase(first, last); }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     */    \n    size_type erase(const key_type& key) { return m_ht.erase(key); }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup to the value if you already have the hash.\n     */    \n    size_type erase(const key_type& key, std::size_t precalculated_hash) { \n        return m_ht.erase(key, precalculated_hash); \n    }\n    \n    /**\n     * @copydoc erase(iterator pos)\n     * \n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type erase(const K& key) { return m_ht.erase(key); }\n    \n    /**\n     * @copydoc erase(const key_type& key, std::size_t precalculated_hash)\n     * \n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type erase(const K& key, std::size_t precalculated_hash) { \n        return m_ht.erase(key, precalculated_hash); \n    }\n    \n    \n    \n    void swap(ordered_set& other) { other.m_ht.swap(m_ht); }\n    \n    /*\n     * Lookup\n     */\n    size_type count(const Key& key) const { return m_ht.count(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    size_type count(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.count(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>\n    size_type count(const K& key) const { return m_ht.count(key); }\n    \n    /**\n     * @copydoc count(const K& key) const\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */     \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type count(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.count(key, precalculated_hash);\n    }\n    \n    \n    \n    \n    iterator find(const Key& key) { return m_ht.find(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    iterator find(const Key& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); }\n    \n    const_iterator find(const Key& key) const { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const Key& key, std::size_t precalculated_hash)\n     */\n    const_iterator find(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.find(key, precalculated_hash);\n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>\n    iterator find(const K& key) { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    iterator find(const K& key, std::size_t precalculated_hash) { return m_ht.find(key, precalculated_hash); }\n    \n    /**\n     * @copydoc find(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>\n    const_iterator find(const K& key) const { return m_ht.find(key); }\n    \n    /**\n     * @copydoc find(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    const_iterator find(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.find(key, precalculated_hash); \n    }\n    \n    \n    \n    std::pair<iterator, iterator> equal_range(const Key& key) { return m_ht.equal_range(key); }\n    \n    /**\n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    std::pair<iterator, iterator> equal_range(const Key& key, std::size_t precalculated_hash) { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    std::pair<const_iterator, const_iterator> equal_range(const Key& key) const { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const Key& key, std::size_t precalculated_hash)\n     */\n    std::pair<const_iterator, const_iterator> equal_range(const Key& key, std::size_t precalculated_hash) const { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    /**\n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>     \n    std::pair<iterator, iterator> equal_range(const K& key) { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<iterator, iterator> equal_range(const K& key, std::size_t precalculated_hash) { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n    /**\n     * @copydoc equal_range(const K& key)\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr>     \n    std::pair<const_iterator, const_iterator> equal_range(const K& key) const { return m_ht.equal_range(key); }\n    \n    /**\n     * @copydoc equal_range(const K& key, std::size_t precalculated_hash)\n     */    \n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    std::pair<const_iterator, const_iterator> equal_range(const K& key, std::size_t precalculated_hash) const { \n        return m_ht.equal_range(key, precalculated_hash); \n    }\n    \n\n    /*\n     * Bucket interface \n     */\n    size_type bucket_count() const { return m_ht.bucket_count(); }\n    size_type max_bucket_count() const { return m_ht.max_bucket_count(); }\n    \n    \n    /*\n     *  Hash policy \n     */\n    float load_factor() const { return m_ht.load_factor(); }\n    float max_load_factor() const { return m_ht.max_load_factor(); }\n    void max_load_factor(float ml) { m_ht.max_load_factor(ml); }\n    \n    void rehash(size_type count) { m_ht.rehash(count); }\n    void reserve(size_type count) { m_ht.reserve(count); }\n    \n    \n    /*\n     * Observers\n     */\n    hasher hash_function() const { return m_ht.hash_function(); }\n    key_equal key_eq() const { return m_ht.key_eq(); }\n    \n    \n    /*\n     * Other\n     */\n    \n    /**\n     * Convert a const_iterator to an iterator.\n     */\n    iterator mutable_iterator(const_iterator pos) {\n        return m_ht.mutable_iterator(pos);\n    }\n    \n    /**\n     * Requires index <= size().\n     * \n     * Return an iterator to the element at index. Return end() if index == size().\n     */\n    iterator nth(size_type index) { return m_ht.nth(index); }\n    \n    /**\n     * @copydoc nth(size_type index)\n     */\n    const_iterator nth(size_type index) const { return m_ht.nth(index); }\n    \n    \n    /**\n     * Return const_reference to the first element. Requires the container to not be empty.\n     */\n    const_reference front() const { return m_ht.front(); }\n    \n    /**\n     * Return const_reference to the last element. Requires the container to not be empty.\n     */\n    const_reference back() const { return m_ht.back(); }\n    \n    \n    /**\n     * Only available if ValueTypeContainer is a std::vector. Same as calling 'values_container().data()'.\n     */ \n    template<class U = values_container_type, typename std::enable_if<tsl::detail_ordered_hash::is_vector<U>::value>::type* = nullptr>    \n    const typename values_container_type::value_type* data() const noexcept { return m_ht.data(); }\n    \n    /**\n     * Return the container in which the values are stored. The values are in the same order as the insertion order\n     * and are contiguous in the structure, no holes (size() == values_container().size()).\n     */        \n    const values_container_type& values_container() const noexcept { return m_ht.values_container(); }\n\n    template<class U = values_container_type, typename std::enable_if<tsl::detail_ordered_hash::is_vector<U>::value>::type* = nullptr>    \n    size_type capacity() const noexcept { return m_ht.capacity(); }\n    \n    void shrink_to_fit() { m_ht.shrink_to_fit(); }\n    \n    \n    \n    /**\n     * Insert the value before pos shifting all the elements on the right of pos (including pos) one position \n     * to the right.\n     * \n     * Amortized linear time-complexity in the distance between pos and end().\n     */\n    std::pair<iterator, bool> insert_at_position(const_iterator pos, const value_type& value) { \n        return m_ht.insert_at_position(pos, value); \n    }\n    \n    /**\n     * @copydoc insert_at_position(const_iterator pos, const value_type& value)\n     */\n    std::pair<iterator, bool> insert_at_position(const_iterator pos, value_type&& value) { \n        return m_ht.insert_at_position(pos, std::move(value)); \n    }\n    \n    /**\n     * @copydoc insert_at_position(const_iterator pos, const value_type& value)\n     * \n     * Same as insert_at_position(pos, value_type(std::forward<Args>(args)...), mainly\n     * here for coherence.\n     */\n    template<class... Args>\n    std::pair<iterator, bool> emplace_at_position(const_iterator pos, Args&&... args) {\n        return m_ht.emplace_at_position(pos, std::forward<Args>(args)...); \n    }\n    \n    \n    \n    void pop_back() { m_ht.pop_back(); }\n    \n    /**\n     * Faster erase operation with an O(1) average complexity but it doesn't preserve the insertion order.\n     * \n     * If an erasure occurs, the last element of the map will take the place of the erased element.\n     */    \n    iterator unordered_erase(iterator pos) { return m_ht.unordered_erase(pos); }\n    \n    /**\n     * @copydoc unordered_erase(iterator pos)\n     */    \n    iterator unordered_erase(const_iterator pos) { return m_ht.unordered_erase(pos); }\n    \n    /**\n     * @copydoc unordered_erase(iterator pos)\n     */    \n    size_type unordered_erase(const key_type& key) { return m_ht.unordered_erase(key); }\n    \n    /**\n     * @copydoc unordered_erase(iterator pos)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */    \n    size_type unordered_erase(const key_type& key, std::size_t precalculated_hash) { \n        return m_ht.unordered_erase(key, precalculated_hash); \n    }\n    \n    /**\n     * @copydoc unordered_erase(iterator pos)\n     * \n     * This overload only participates in the overload resolution if the typedef KeyEqual::is_transparent exists. \n     * If so, K must be hashable and comparable to Key.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type unordered_erase(const K& key) { return m_ht.unordered_erase(key); }\n    \n    /**\n     * @copydoc unordered_erase(const K& key)\n     * \n     * Use the hash value 'precalculated_hash' instead of hashing the key. The hash value should be the same\n     * as hash_function()(key). Usefull to speed-up the lookup if you already have the hash.\n     */\n    template<class K, class KE = KeyEqual, typename std::enable_if<has_is_transparent<KE>::value>::type* = nullptr> \n    size_type unordered_erase(const K& key, std::size_t precalculated_hash) { \n        return m_ht.unordered_erase(key, precalculated_hash); \n    }\n    \n    /**\n     * Serialize the set through the `serializer` parameter.\n     * \n     * The `serializer` parameter must be a function object that supports the following call:\n     *  - `void operator()(const U& value);` where the types `std::uint64_t`, `float` and `Key` must be supported for U.\n     * \n     * The implementation leaves binary compatibilty (endianness, IEEE 754 for floats, ...) of the types it serializes\n     * in the hands of the `Serializer` function object if compatibilty is required.\n     */\n    template<class Serializer>\n    void serialize(Serializer& serializer) const {\n        m_ht.serialize(serializer);\n    }\n\n    /**\n     * Deserialize a previouly serialized set through the `deserializer` parameter.\n     * \n     * The `deserializer` parameter must be a function object that supports the following calls:\n     *  - `template<typename U> U operator()();` where the types `std::uint64_t`, `float` and `Key` must be supported for U.\n     * \n     * If the deserialized hash set type is hash compatible with the serialized set, the deserialization process can be\n     * sped up by setting `hash_compatible` to true. To be hash compatible, the Hash and KeyEqual must behave the same way \n     * than the ones used on the serialized map. The `std::size_t` must also be of the same size as the one on the platform used\n     * to serialize the map, the same apply for `IndexType`. If these criteria are not met, the behaviour is undefined with \n     * `hash_compatible` sets to true.\n     *\n     * The behaviour is undefined if the type `Key` of the `ordered_set` is not the same as the\n     * type used during serialization.\n     * \n     * The implementation leaves binary compatibilty (endianness, IEEE 754 for floats, size of int, ...) of the types it \n     * deserializes in the hands of the `Deserializer` function object if compatibilty is required.\n     */\n    template<class Deserializer>\n    static ordered_set deserialize(Deserializer& deserializer, bool hash_compatible = false) {\n        ordered_set set(0);\n        set.m_ht.deserialize(deserializer, hash_compatible);\n\n        return set;\n    }\n    \n    \n    \n    friend bool operator==(const ordered_set& lhs, const ordered_set& rhs) { return lhs.m_ht == rhs.m_ht; }\n    friend bool operator!=(const ordered_set& lhs, const ordered_set& rhs) { return lhs.m_ht != rhs.m_ht; }\n    friend bool operator<(const ordered_set& lhs, const ordered_set& rhs) { return lhs.m_ht < rhs.m_ht; }\n    friend bool operator<=(const ordered_set& lhs, const ordered_set& rhs) { return lhs.m_ht <= rhs.m_ht; }\n    friend bool operator>(const ordered_set& lhs, const ordered_set& rhs) { return lhs.m_ht > rhs.m_ht; }\n    friend bool operator>=(const ordered_set& lhs, const ordered_set& rhs) { return lhs.m_ht >= rhs.m_ht; }\n    \n    friend void swap(ordered_set& lhs, ordered_set& rhs) { lhs.swap(rhs); }\n    \nprivate:\n    ht m_ht;    \n};\n\n} // end namespace tsl\n\n#endif\n"
  },
  {
    "path": "extern/xxHash/.gitattributes",
    "content": "# Set the default behavior\n* text eol=lf\n\n# Explicitly declare source files\n*.c text eol=lf\n*.h text eol=lf\n\n# Denote files that should not be modified.\n*.odt binary\n\n"
  },
  {
    "path": "extern/xxHash/.gitignore",
    "content": "# objects\n*.o\n\n# libraries\nlibxxhash.*\n\n# Executables\nxxh32sum\nxxh64sum\nxxhsum\nxxhsum32\nxxhsum_privateXXH\nxxhsum_inlinedXXH\n\n# Mac OS-X artefacts\n*.dSYM\n.DS_Store\n"
  },
  {
    "path": "extern/xxHash/.travis.yml",
    "content": "language: c\ncompiler: gcc\nscript: make -B test-all\nbefore_install:\n  - sudo apt-get update  -qq\n  - sudo apt-get install -qq gcc-arm-linux-gnueabi\n  - sudo apt-get install -qq clang\n  - sudo apt-get install -qq g++-multilib\n  - sudo apt-get install -qq gcc-multilib\n"
  },
  {
    "path": "extern/xxHash/LICENSE",
    "content": "xxHash Library\nCopyright (c) 2012-2014, Yann Collet\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice, this\n  list of conditions and the following disclaimer in the documentation and/or\n  other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "extern/xxHash/Makefile",
    "content": "# ################################################################\n# xxHash Makefile\n# Copyright (C) Yann Collet 2012-2015\n#\n# GPL v2 License\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# You can contact the author at :\n#  - xxHash source repository : http://code.google.com/p/xxhash/\n# ################################################################\n# xxhsum : provides 32/64 bits hash of one or multiple files, or stdin\n# ################################################################\n\n# Version numbers\nLIBVER_MAJOR_SCRIPT:=`sed -n '/define XXH_VERSION_MAJOR/s/.*[[:blank:]]\\([0-9][0-9]*\\).*/\\1/p' < xxhash.h`\nLIBVER_MINOR_SCRIPT:=`sed -n '/define XXH_VERSION_MINOR/s/.*[[:blank:]]\\([0-9][0-9]*\\).*/\\1/p' < xxhash.h`\nLIBVER_PATCH_SCRIPT:=`sed -n '/define XXH_VERSION_RELEASE/s/.*[[:blank:]]\\([0-9][0-9]*\\).*/\\1/p' < xxhash.h`\nLIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT))\nLIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT))\nLIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT))\nLIBVER := $(LIBVER_MAJOR).$(LIBVER_MINOR).$(LIBVER_PATCH)\n\n# SSE4 detection\nHAVE_SSE4 := $(shell $(CC) -dM -E - < /dev/null | grep \"SSE4\" > /dev/null && echo 1 || echo 0)\nifeq ($(HAVE_SSE4), 1)\nNOSSE4 := -mno-sse4\nelse\nNOSSE4 :=\nendif\n\nCFLAGS ?= -O2 $(NOSSE4)   # disables potential auto-vectorization\nCFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \\\n          -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \\\n          -Wstrict-prototypes -Wundef\n\nFLAGS   = $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(MOREFLAGS)\nXXHSUM_VERSION=$(LIBVER)\nMD2ROFF = ronn\nMD2ROFF_FLAGS = --roff --warnings --manual=\"User Commands\" --organization=\"xxhsum $(XXHSUM_VERSION)\"\n\n# Define *.exe as extension for Windows systems\nifneq (,$(filter Windows%,$(OS)))\nEXT =.exe\nelse\nEXT =\nendif\n\n# OS X linker doesn't support -soname, and use different extension\n# see : https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryDesignGuidelines.html\nifeq ($(shell uname), Darwin)\n\tSHARED_EXT = dylib\n\tSHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT)\n\tSHARED_EXT_VER = $(LIBVER).$(SHARED_EXT)\n\tSONAME_FLAGS = -install_name $(LIBDIR)/libxxhash.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER)\nelse\n\tSONAME_FLAGS = -Wl,-soname=libxxhash.$(SHARED_EXT).$(LIBVER_MAJOR)\n\tSHARED_EXT = so\n\tSHARED_EXT_MAJOR = $(SHARED_EXT).$(LIBVER_MAJOR)\n\tSHARED_EXT_VER = $(SHARED_EXT).$(LIBVER)\nendif\n\nLIBXXH = libxxhash.$(SHARED_EXT_VER)\n\n\n.PHONY: default\ndefault: lib xxhsum_and_links\n\n.PHONY: all\nall: lib xxhsum xxhsum_inlinedXXH\n\nxxhsum32: CFLAGS += -m32\nxxhsum xxhsum32: xxhash.c xxhsum.c\n\t$(CC) $(FLAGS) $^ -o $@$(EXT)\n\n.PHONY: xxhsum_and_links\nxxhsum_and_links: xxhsum\n\tln -sf xxhsum xxh32sum\n\tln -sf xxhsum xxh64sum\n\nxxhsum_inlinedXXH: xxhsum.c\n\t$(CC) $(FLAGS) -DXXH_PRIVATE_API $^ -o $@$(EXT)\n\n\n# library\n\nlibxxhash.a: ARFLAGS = rcs\nlibxxhash.a: xxhash.o\n\t@echo compiling static library\n\t@$(AR) $(ARFLAGS) $@ $^\n\n$(LIBXXH): LDFLAGS += -shared\nifeq (,$(filter Windows%,$(OS)))\n$(LIBXXH): LDFLAGS += -fPIC\nendif\n$(LIBXXH): xxhash.c\n\t@echo compiling dynamic library $(LIBVER)\n\t@$(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@\n\t@echo creating versioned links\n\t@ln -sf $@ libxxhash.$(SHARED_EXT_MAJOR)\n\t@ln -sf $@ libxxhash.$(SHARED_EXT)\n\nlibxxhash : $(LIBXXH)\n\nlib: libxxhash.a libxxhash\n\n\n# tests\n\n.PHONY: check\ncheck: xxhsum\n\t# stdin\n\t./xxhsum < xxhash.c\n\t# multiple files\n\t./xxhsum xxhash.* xxhsum.*\n\t# internal bench\n\t./xxhsum -bi1\n\t# file bench\n\t./xxhsum -bi1 xxhash.c\n\n.PHONY: test-mem\ntest-mem: xxhsum\n\t# memory tests\n\tvalgrind --leak-check=yes --error-exitcode=1 ./xxhsum -bi1 xxhash.c\n\tvalgrind --leak-check=yes --error-exitcode=1 ./xxhsum -H0  xxhash.c\n\tvalgrind --leak-check=yes --error-exitcode=1 ./xxhsum -H1  xxhash.c\n\n.PHONY: test32\ntest32: clean xxhsum32\n\t@echo ---- test 32-bit ----\n\t./xxhsum32 -bi1 xxhash.c\n\ntest-xxhsum-c: xxhsum\n\t# xxhsum to/from pipe\n\t./xxhsum lib* | ./xxhsum -c -\n\t./xxhsum -H0 lib* | ./xxhsum -c -\n\t# xxhsum to/from file, shell redirection\n\t./xxhsum lib* > .test.xxh64\n\t./xxhsum -H0 lib* > .test.xxh32\n\t./xxhsum -c .test.xxh64\n\t./xxhsum -c .test.xxh32\n\t./xxhsum -c < .test.xxh64\n\t./xxhsum -c < .test.xxh32\n\t# xxhsum -c warns improperly format lines.\n\tcat .test.xxh64 .test.xxh32 | ./xxhsum -c -\n\tcat .test.xxh32 .test.xxh64 | ./xxhsum -c -\n\t# Expects \"FAILED\"\n\techo \"0000000000000000  LICENSE\" | ./xxhsum -c -; test $$? -eq 1\n\techo \"00000000  LICENSE\" | ./xxhsum -c -; test $$? -eq 1\n\t# Expects \"FAILED open or read\"\n\techo \"0000000000000000  test-expects-file-not-found\" | ./xxhsum -c -; test $$? -eq 1\n\techo \"00000000  test-expects-file-not-found\" | ./xxhsum -c -; test $$? -eq 1\n\t@$(RM) -f .test.xxh32 .test.xxh64\n\narmtest: clean\n\t@echo ---- test ARM compilation ----\n\t$(MAKE) xxhsum CC=arm-linux-gnueabi-gcc MOREFLAGS=\"-Werror -static\"\n\nclangtest: clean\n\t@echo ---- test clang compilation ----\n\t$(MAKE) all CC=clang MOREFLAGS=\"-Werror -Wconversion -Wno-sign-conversion\"\n\ngpptest: clean\n\t@echo ---- test g++ compilation ----\n\t$(MAKE) all CC=g++ CFLAGS=\"-O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror\"\n\nc90test: clean\n\t@echo ---- test strict C90 compilation [xxh32 only] ----\n\t$(CC) -std=c90 -Werror -pedantic -DXXH_NO_LONG_LONG -c xxhash.c\n\t$(RM) xxhash.o\n\nusan: CC=clang\nusan: clean\n\t@echo ---- check undefined behavior - sanitize ----\n\t$(MAKE) clean test CC=$(CC) MOREFLAGS=\"-g -fsanitize=undefined -fno-sanitize-recover=all\"\n\nstaticAnalyze: clean\n\t@echo ---- static analyzer - scan-build ----\n\tCFLAGS=\"-g -Werror\" scan-build --status-bugs -v $(MAKE) all\n\nnamespaceTest:\n\t$(CC) -c xxhash.c\n\t$(CC) -DXXH_NAMESPACE=TEST_ -c xxhash.c -o xxhash2.o\n\t$(CC) xxhash.o xxhash2.o xxhsum.c -o xxhsum2  # will fail if one namespace missing (symbol collision)\n\t$(RM) *.o xxhsum2  # clean\n\nxxhsum.1: xxhsum.1.md\n\tcat $^ | $(MD2ROFF) $(MD2ROFF_FLAGS) | sed -n '/^\\.\\\\\\\".*/!p' > $@\n\nman: xxhsum.1\n\nclean-man:\n\t$(RM) xxhsum.1\n\npreview-man: clean-man man\n\tman ./xxhsum.1\n\ntest: all namespaceTest check test-xxhsum-c c90test\n\ntest-all: test test32 armtest clangtest gpptest usan listL120 trailingWhitespace staticAnalyze\n\n.PHONY: listL120\nlistL120:  # extract lines >= 120 characters in *.{c,h}, by Takayuki Matsuoka (note : $$, for Makefile compatibility)\n\tfind . -type f -name '*.c' -o -name '*.h' | while read -r filename; do awk 'length > 120 {print FILENAME \"(\" FNR \"): \" $$0}' $$filename; done\n\n.PHONY: trailingWhitespace\ntrailingWhitespace:\n\t! grep -E \"`printf '[ \\\\t]$$'`\" *.1 *.c *.h LICENSE Makefile cmake_unofficial/CMakeLists.txt\n\n.PHONY: clean\nclean:\n\t@$(RM) -r *.dSYM   # Mac OS-X specific\n\t@$(RM) core *.o libxxhash.*\n\t@$(RM) xxhsum$(EXT) xxhsum32$(EXT) xxhsum_inlinedXXH$(EXT) xxh32sum xxh64sum\n\t@echo cleaning completed\n\n\n#-----------------------------------------------------------------------------\n# make install is validated only for the following targets\n#-----------------------------------------------------------------------------\nifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS))\n\n.PHONY: list\nlist:\n\t@$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ \"^[#.]\") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs\n\nDESTDIR     ?=\n# directory variables : GNU conventions prefer lowercase\n# see https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html\n# support both lower and uppercase (BSD), use uppercase in script\nprefix      ?= /usr/local\nPREFIX      ?= $(prefix)\nexec_prefix ?= $(PREFIX)\nlibdir      ?= $(exec_prefix)/lib\nLIBDIR      ?= $(libdir)\nincludedir  ?= $(PREFIX)/include\nINCLUDEDIR  ?= $(includedir)\nbindir      ?= $(exec_prefix)/bin\nBINDIR      ?= $(bindir)\ndatarootdir ?= $(PREFIX)/share\nmandir      ?= $(datarootdir)/man\nman1dir     ?= $(mandir)/man1\n\nifneq (,$(filter $(shell uname),OpenBSD FreeBSD NetBSD DragonFly SunOS))\nMANDIR  ?= $(PREFIX)/man/man1\nelse\nMANDIR  ?= $(man1dir)\nendif\n\nifneq (,$(filter $(shell uname),SunOS))\nINSTALL ?= ginstall\nelse\nINSTALL ?= install\nendif\n\nINSTALL_PROGRAM ?= $(INSTALL)\nINSTALL_DATA    ?= $(INSTALL) -m 644\n\n\n.PHONY: install\ninstall: lib xxhsum\n\t@echo Installing libxxhash\n\t@$(INSTALL) -d -m 755 $(DESTDIR)$(LIBDIR)\n\t@$(INSTALL_DATA) libxxhash.a $(DESTDIR)$(LIBDIR)\n\t@$(INSTALL_PROGRAM) $(LIBXXH) $(DESTDIR)$(LIBDIR)\n\t@ln -sf $(LIBXXH) $(DESTDIR)$(LIBDIR)/libxxhash.$(SHARED_EXT_MAJOR)\n\t@ln -sf $(LIBXXH) $(DESTDIR)$(LIBDIR)/libxxhash.$(SHARED_EXT)\n\t@$(INSTALL) -d -m 755 $(DESTDIR)$(INCLUDEDIR)   # includes\n\t@$(INSTALL_DATA) xxhash.h $(DESTDIR)$(INCLUDEDIR)\n\t@echo Installing xxhsum\n\t@$(INSTALL) -d -m 755 $(DESTDIR)$(BINDIR)/ $(DESTDIR)$(MANDIR)/\n\t@$(INSTALL_PROGRAM) xxhsum $(DESTDIR)$(BINDIR)/xxhsum\n\t@ln -sf xxhsum $(DESTDIR)$(BINDIR)/xxh32sum\n\t@ln -sf xxhsum $(DESTDIR)$(BINDIR)/xxh64sum\n\t@echo Installing man pages\n\t@$(INSTALL_DATA) xxhsum.1 $(DESTDIR)$(MANDIR)/xxhsum.1\n\t@ln -sf xxhsum.1 $(DESTDIR)$(MANDIR)/xxh32sum.1\n\t@ln -sf xxhsum.1 $(DESTDIR)$(MANDIR)/xxh64sum.1\n\t@echo xxhash installation completed\n\n.PHONY: uninstall\nuninstall:\n\t@$(RM) $(DESTDIR)$(LIBDIR)/libxxhash.a\n\t@$(RM) $(DESTDIR)$(LIBDIR)/libxxhash.$(SHARED_EXT)\n\t@$(RM) $(DESTDIR)$(LIBDIR)/libxxhash.$(SHARED_EXT_MAJOR)\n\t@$(RM) $(DESTDIR)$(LIBDIR)/$(LIBXXH)\n\t@$(RM) $(DESTDIR)$(INCLUDEDIR)/xxhash.h\n\t@$(RM) $(DESTDIR)$(BINDIR)/xxh32sum\n\t@$(RM) $(DESTDIR)$(BINDIR)/xxh64sum\n\t@$(RM) $(DESTDIR)$(BINDIR)/xxhsum\n\t@$(RM) $(DESTDIR)$(MANDIR)/xxh32sum.1\n\t@$(RM) $(DESTDIR)$(MANDIR)/xxh64sum.1\n\t@$(RM) $(DESTDIR)$(MANDIR)/xxhsum.1\n\t@echo xxhsum successfully uninstalled\n\nendif\n"
  },
  {
    "path": "extern/xxHash/README.md",
    "content": "xxHash - Extremely fast hash algorithm\n======================================\n\nxxHash is an Extremely fast Hash algorithm, running at RAM speed limits.\nIt successfully completes the [SMHasher](http://code.google.com/p/smhasher/wiki/SMHasher) test suite\nwhich evaluates collision, dispersion and randomness qualities of hash functions.\nCode is highly portable, and hashes are identical on all platforms (little / big endian).\n\n|Branch      |Status   |\n|------------|---------|\n|master      | [![Build Status](https://travis-ci.org/Cyan4973/xxHash.svg?branch=master)](https://travis-ci.org/Cyan4973/xxHash?branch=master) |\n|dev         | [![Build Status](https://travis-ci.org/Cyan4973/xxHash.svg?branch=dev)](https://travis-ci.org/Cyan4973/xxHash?branch=dev) |\n\n\n\nBenchmarks\n-------------------------\n\nThe benchmark uses SMHasher speed test, compiled with Visual 2010 on a Windows Seven 32-bit box.\nThe reference system uses a Core 2 Duo @3GHz\n\n\n| Name          |   Speed  | Quality | Author           |\n|---------------|----------|:-------:|------------------|\n| [xxHash]      | 5.4 GB/s |   10    | Y.C.             |\n| MurmurHash 3a | 2.7 GB/s |   10    | Austin Appleby   |\n| SBox          | 1.4 GB/s |    9    | Bret Mulvey      |\n| Lookup3       | 1.2 GB/s |    9    | Bob Jenkins      |\n| CityHash64    | 1.05 GB/s|   10    | Pike & Alakuijala|\n| FNV           | 0.55 GB/s|    5    | Fowler, Noll, Vo |\n| CRC32         | 0.43 GB/s|    9    |                  |\n| MD5-32        | 0.33 GB/s|   10    | Ronald L.Rivest  |\n| SHA1-32       | 0.28 GB/s|   10    |                  |\n\n[xxHash]: http://www.xxhash.com\n\nQ.Score is a measure of quality of the hash function.\nIt depends on successfully passing SMHasher test set.\n10 is a perfect score.\nAlgorithms with a score < 5 are not listed on this table.\n\nA more recent version, XXH64, has been created thanks to [Mathias Westerdahl](https://github.com/JCash),\nwhich offers superior speed and dispersion for 64-bit systems.\nNote however that 32-bit applications will still run faster using the 32-bit version.\n\nSMHasher speed test, compiled using GCC 4.8.2, on Linux Mint 64-bit.\nThe reference system uses a Core i5-3340M @2.7GHz\n\n| Version    | Speed on 64-bit | Speed on 32-bit |\n|------------|------------------|------------------|\n| XXH64      | 13.8 GB/s        |  1.9 GB/s        |\n| XXH32      |  6.8 GB/s        |  6.0 GB/s        |\n\nThis project also includes a command line utility, named `xxhsum`, offering similar features as `md5sum`,\nthanks to [Takayuki Matsuoka](https://github.com/t-mat) contributions.\n\n\n### License\n\nThe library files `xxhash.c` and `xxhash.h` are BSD licensed.\nThe utility `xxhsum` is GPL licensed.\n\n\n### Build modifiers\n\nThe following macros can be set at compilation time,\nthey modify xxhash behavior. They are all disabled by default.\n\n- `XXH_INLINE_ALL` : Make all functions `inline`, with bodies directly included within `xxhash.h`.\n                     There is no need for an `xxhash.o` module in this case.\n                     Inlining functions is generally beneficial for speed on small keys.\n                     It's especially effective when key length is a compile time constant,\n                     with observed performance improvement in the +200% range .\n                     See [this article](https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html) for details.\n- `XXH_ACCEPT_NULL_INPUT_POINTER` : if set to `1`, when input is a null-pointer,\n                                    xxhash result is the same as a zero-length key\n                                    (instead of a dereference segfault).\n- `XXH_FORCE_MEMORY_ACCESS` : default method `0` uses a portable `memcpy()` notation.\n                              Method `1` uses a gcc-specific `packed` attribute, which can provide better performance for some targets.\n                              Method `2` forces unaligned reads, which is not standard compliant, but might sometimes be the only way to extract better performance.\n- `XXH_CPU_LITTLE_ENDIAN` : by default, endianess is determined at compile time.\n                            It's possible to skip auto-detection and force format to little-endian, by setting this macro to 1.\n                            Setting it to 0 forces big-endian.\n- `XXH_FORCE_NATIVE_FORMAT` : on big-endian systems : use native number representation.\n                              Breaks consistency with little-endian results.\n- `XXH_PRIVATE_API` : same impact as `XXH_INLINE_ALL`.\n                      Name underlines that symbols will not be published on library public interface.\n- `XXH_NAMESPACE` : prefix all symbols with the value of `XXH_NAMESPACE`.\n                    Useful to evade symbol naming collisions,\n                    in case of multiple inclusions of xxHash source code.\n                    Client applications can still use regular function name,\n                    symbols are automatically translated through `xxhash.h`.\n- `XXH_STATIC_LINKING_ONLY` : gives access to state declaration for static allocation.\n                              Incompatible with dynamic linking, due to risks of ABI changes.\n- `XXH_NO_LONG_LONG` : removes support for XXH64,\n                       for targets without 64-bit support.\n\n\n### Example\n\nCalling xxhash 64-bit variant from a C program :\n\n```\n#include \"xxhash.h\"\n\nunsigned long long calcul_hash(const void* buffer, size_t length)\n{\n    unsigned long long const seed = 0;   /* or any other value */\n    unsigned long long const hash = XXH64(buffer, length, seed);\n    return hash;\n}\n```\n\nUsing streaming variant is more involved, but makes it possible to provide data in multiple rounds :\n```\n#include \"stdlib.h\"   /* abort() */\n#include \"xxhash.h\"\n\n\nunsigned long long calcul_hash_streaming(someCustomType handler)\n{\n    XXH64_state_t* const state = XXH64_createState();\n    if (state==NULL) abort();\n\n    size_t const bufferSize = SOME_VALUE;\n    void* const buffer = malloc(bufferSize);\n    if (buffer==NULL) abort();\n\n    unsigned long long const seed = 0;   /* or any other value */\n    XXH_errorcode const resetResult = XXH64_reset(state, seed);\n    if (resetResult == XXH_ERROR) abort();\n\n    (...)\n    while ( /* any condition */ ) {\n        size_t const length = get_more_data(buffer, bufferSize, handler);   /* undescribed */\n        XXH_errorcode const addResult = XXH64_update(state, buffer, length);\n        if (addResult == XXH_ERROR) abort();\n        (...)\n    }\n\n    (...)\n    unsigned long long const hash = XXH64_digest(state);\n\n    free(buffer);\n    XXH64_freeState(state);\n\n    return hash;\n}\n```\n\n\n### Other programming languages\n\nBeyond the C reference version,\nxxHash is also available on many programming languages,\nthanks to great contributors.\nThey are [listed here](http://www.xxhash.com/#other-languages).\n\n\n### Branch Policy\n\n> - The \"master\" branch is considered stable, at all times.\n> - The \"dev\" branch is the one where all contributions must be merged\n    before being promoted to master.\n>   + If you plan to propose a patch, please commit into the \"dev\" branch,\n      or its own feature branch.\n      Direct commit to \"master\" are not permitted.\n"
  },
  {
    "path": "extern/xxHash/appveyor.yml",
    "content": "version: 1.0.{build}\nenvironment:\n  matrix:\n  - COMPILER: \"gcc\"\n    PLATFORM: \"mingw64\"\n  - COMPILER: \"gcc\"\n    PLATFORM: \"mingw32\"\n\ninstall:\n  - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION%\n  - MKDIR bin\n  - if [%COMPILER%]==[gcc] SET PATH_ORIGINAL=%PATH%\n  - if [%COMPILER%]==[gcc] (\n      SET \"PATH_MINGW32=c:\\MinGW\\bin;c:\\MinGW\\usr\\bin\" &&\n      SET \"PATH_MINGW64=c:\\msys64\\mingw64\\bin;c:\\msys64\\usr\\bin\" &&\n      COPY C:\\MinGW\\bin\\mingw32-make.exe C:\\MinGW\\bin\\make.exe &&\n      COPY C:\\MinGW\\bin\\gcc.exe C:\\MinGW\\bin\\cc.exe\n    ) else (\n      IF [%PLATFORM%]==[x64] (SET ADDITIONALPARAM=/p:LibraryPath=\"C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\lib\\x64;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\lib\\amd64;C:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\;C:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\lib\\amd64;\")\n    )\n\nbuild_script:\n  - if [%PLATFORM%]==[mingw32] SET PATH=%PATH_MINGW32%;%PATH_ORIGINAL%\n  - if [%PLATFORM%]==[mingw64] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL%\n  - if [%PLATFORM%]==[clang] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL%\n  - ECHO *** &&\n      ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION% &&\n      ECHO ***\n  - if [%PLATFORM%]==[clang] (clang -v)\n  - if [%COMPILER%]==[gcc] (gcc -v)\n  - if [%COMPILER%]==[gcc] (\n      echo ----- &&\n      make -v &&\n      echo ----- &&\n      if not [%PLATFORM%]==[clang] (\n        make -B clean test MOREFLAGS=-Werror\n      ) ELSE (\n        make -B clean test CC=clang MOREFLAGS=\"--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion\"\n      )\n    )\n  - if [%COMPILER%]==[visual] (\n      ECHO *** &&\n      ECHO *** Building Visual Studio 2010 %PLATFORM%\\%CONFIGURATION% &&\n      ECHO *** &&\n      msbuild \"visual\\VS2010\\lz4.sln\" %ADDITIONALPARAM% /m /verbosity:minimal /property:PlatformToolset=v100 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /p:EnableWholeProgramOptimization=true /logger:\"C:\\Program Files\\AppVeyor\\BuildAgent\\Appveyor.MSBuildLogger.dll\" &&\n      ECHO *** &&\n      ECHO *** Building Visual Studio 2012 %PLATFORM%\\%CONFIGURATION% &&\n      ECHO *** &&\n      msbuild \"visual\\VS2010\\lz4.sln\" /m /verbosity:minimal /property:PlatformToolset=v110 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:\"C:\\Program Files\\AppVeyor\\BuildAgent\\Appveyor.MSBuildLogger.dll\" &&\n      ECHO *** &&\n      ECHO *** Building Visual Studio 2013 %PLATFORM%\\%CONFIGURATION% &&\n      ECHO *** &&\n      msbuild \"visual\\VS2010\\lz4.sln\" /m /verbosity:minimal /property:PlatformToolset=v120 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:\"C:\\Program Files\\AppVeyor\\BuildAgent\\Appveyor.MSBuildLogger.dll\" &&\n      ECHO *** &&\n      ECHO *** Building Visual Studio 2015 %PLATFORM%\\%CONFIGURATION% &&\n      ECHO *** &&\n      msbuild \"visual\\VS2010\\lz4.sln\" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:\"C:\\Program Files\\AppVeyor\\BuildAgent\\Appveyor.MSBuildLogger.dll\" &&\n      COPY visual\\VS2010\\bin\\%PLATFORM%_%CONFIGURATION%\\*.exe programs\\\n    )\n\ntest_script:\n  - ECHO *** &&\n      ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION% &&\n      ECHO ***\n  - if not [%COMPILER%]==[unknown] (\n      xxhsum -h &&\n      xxhsum xxhsum.exe &&\n      xxhsum -bi1 &&\n      echo ------- xxhsum tested -------\n    )\n"
  },
  {
    "path": "extern/xxHash/cmake_unofficial/.gitignore",
    "content": "# cmake artifacts\n\nCMakeCache.txt\nCMakeFiles\nMakefile\ncmake_install.cmake\n\n\n# make compilation results\n\nlibxxhash.0.6.3.dylib\nlibxxhash.0.dylib\nlibxxhash.a\nlibxxhash.dylib\n"
  },
  {
    "path": "extern/xxHash/cmake_unofficial/CMakeLists.txt",
    "content": "# To the extent possible under law, the author(s) have dedicated all\n# copyright and related and neighboring rights to this software to\n# the public domain worldwide. This software is distributed without\n# any warranty.\n#\n# For details, see <http://creativecommons.org/publicdomain/zero/1.0/>.\n\nset(XXHASH_DIR \"${CMAKE_CURRENT_SOURCE_DIR}/..\")\n\nfile(STRINGS \"${XXHASH_DIR}/xxhash.h\" XXHASH_VERSION_MAJOR REGEX \"^#define XXH_VERSION_MAJOR +([0-9]+) *$\")\nstring(REGEX REPLACE \"^#define XXH_VERSION_MAJOR +([0-9]+) *$\" \"\\\\1\" XXHASH_VERSION_MAJOR \"${XXHASH_VERSION_MAJOR}\")\nfile(STRINGS \"${XXHASH_DIR}/xxhash.h\" XXHASH_VERSION_MINOR REGEX \"^#define XXH_VERSION_MINOR +([0-9]+) *$\")\nstring(REGEX REPLACE \"^#define XXH_VERSION_MINOR +([0-9]+) *$\" \"\\\\1\" XXHASH_VERSION_MINOR \"${XXHASH_VERSION_MINOR}\")\nfile(STRINGS \"${XXHASH_DIR}/xxhash.h\" XXHASH_VERSION_RELEASE REGEX \"^#define XXH_VERSION_RELEASE +([0-9]+) *$\")\nstring(REGEX REPLACE \"^#define XXH_VERSION_RELEASE +([0-9]+) *$\" \"\\\\1\" XXHASH_VERSION_RELEASE \"${XXHASH_VERSION_RELEASE}\")\nset(XXHASH_VERSION_STRING \"${XXHASH_VERSION_MAJOR}.${XXHASH_VERSION_MINOR}.${XXHASH_VERSION_RELEASE}\")\nset(XXHASH_LIB_VERSION ${XXHASH_VERSION_STRING})\nset(XXHASH_LIB_SOVERSION \"${XXHASH_VERSION_MAJOR}\")\nmark_as_advanced(XXHASH_VERSION_MAJOR XXHASH_VERSION_MINOR XXHASH_VERSION_RELEASE XXHASH_VERSION_STRING XXHASH_LIB_VERSION XXHASH_LIB_SOVERSION)\n\noption(BUILD_XXHSUM \"Build the xxhsum binary\" OFF)\noption(BUILD_SHARED_LIBS \"Build shared library\" OFF)\n\nif(\"${CMAKE_VERSION}\" VERSION_LESS \"3.0\")\n  project(XXHASH C)\nelse()\n  cmake_policy (SET CMP0048 NEW)\n  project(XXHASH\n    VERSION ${XXHASH_VERSION_STRING}\n    LANGUAGES C)\nendif()\n\ncmake_minimum_required (VERSION 3.6)\n\n# If XXHASH is being bundled in another project, we don't want to\n# install anything.  However, we want to let people override this, so\n# we'll use the XXHASH_BUNDLED_MODE variable to let them do that; just\n# set it to OFF in your project before you add_subdirectory(xxhash/contrib/cmake_unofficial).\nif(CMAKE_CURRENT_SOURCE_DIR STREQUAL \"${CMAKE_SOURCE_DIR}\")\n  # Bundled mode hasn't been set one way or the other, set the default\n  # depending on whether or not we are the top-level project.\n  if(\"${XXHASH_PARENT_DIRECTORY}\" STREQUAL \"\")\n    set(XXHASH_BUNDLED_MODE OFF)\n  else()\n    set(XXHASH_BUNDLED_MODE ON)\n  endif()\nendif()\nmark_as_advanced(XXHASH_BUNDLED_MODE)\n\n# Allow people to choose whether to build shared or static libraries\n# via the BUILD_SHARED_LIBS option unless we are in bundled mode, in\n# which case we always use static libraries.\ninclude(CMakeDependentOption)\nCMAKE_DEPENDENT_OPTION(BUILD_SHARED_LIBS \"Build shared libraries\" ON \"NOT XXHASH_BUNDLED_MODE\" OFF)\n\ninclude_directories(\"${XXHASH_DIR}\")\n\n# libxxhash\nadd_library(xxhash \"${XXHASH_DIR}/xxhash.c\")\nset_target_properties(xxhash PROPERTIES\n  SOVERSION \"${XXHASH_VERSION_STRING}\"\n  VERSION \"${XXHASH_VERSION_STRING}\")\n\n# xxhsum\nadd_executable(xxhsum \"${XXHASH_DIR}/xxhsum.c\")\ntarget_link_libraries(xxhsum xxhash)\n\n# Extra warning flags\ninclude (CheckCCompilerFlag)\nforeach (flag\n    -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow\n    -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement\n    -Wstrict-prototypes -Wundef)\n  # Because https://gcc.gnu.org/wiki/FAQ#wnowarning\n  string(REGEX REPLACE \"\\\\-Wno\\\\-(.+)\" \"-W\\\\1\" flag_to_test \"${flag}\")\n  string(REGEX REPLACE \"[^a-zA-Z0-9]+\" \"_\" test_name \"CFLAG_${flag_to_test}\")\n\n  check_c_compiler_flag(\"${ADD_COMPILER_FLAGS_PREPEND} ${flag_to_test}\" ${test_name})\n\n  if(${test_name})\n    set(CMAKE_C_FLAGS \"${flag} ${CMAKE_C_FLAGS}\")\n  endif()\n\n  unset(test_name)\n  unset(flag_to_test)\nendforeach (flag)\n\nif(NOT XXHASH_BUNDLED_MODE)\n  include(GNUInstallDirs)\n\n  install(TARGETS xxhsum\n    RUNTIME DESTINATION \"${CMAKE_INSTALL_BINDIR}\")\n  install(TARGETS xxhash\n    LIBRARY DESTINATION \"${CMAKE_INSTALL_LIBDIR}\"\n    ARCHIVE DESTINATION \"${CMAKE_INSTALL_LIBDIR}\")\n  install(FILES \"${XXHASH_DIR}/xxhash.h\"\n    DESTINATION \"${CMAKE_INSTALL_INCLUDEDIR}\")\n  install(FILES \"${XXHASH_DIR}/xxhsum.1\"\n    DESTINATION \"${CMAKE_INSTALL_MANDIR}/man1\")\nendif(NOT XXHASH_BUNDLED_MODE)\n"
  },
  {
    "path": "extern/xxHash/cmake_unofficial/README.md",
    "content": "\n\nThe `cmake` script present in this directory offers the following options :\n\n- `BUILD_XXHSUM` : build the command line binary. ON by default\n- `BUILD_SHARED_LIBS` : build dynamic library. ON by default.\n"
  },
  {
    "path": "extern/xxHash/doc/xxhash_spec.md",
    "content": "xxHash fast digest algorithm\n======================\n\n### Notices\n\nCopyright (c) Yann Collet\n\nPermission is granted to copy and distribute this document\nfor any purpose and without charge,\nincluding translations into other languages\nand incorporation into compilations,\nprovided that the copyright notice and this notice are preserved,\nand that any substantive changes or deletions from the original\nare clearly marked.\nDistribution of this document is unlimited.\n\n### Version\n\n0.1.0 (15/01/18)\n\n\nTable of Contents\n---------------------\n- [Introduction](#introduction)\n- [XXH32 algorithm description](#xxh32-algorithm-description)\n- [XXH64 algorithm description](#xxh64-algorithm-description)\n- [Performance considerations](#performance-considerations)\n- [Reference Implementation](#reference-implementation)\n\n\nIntroduction\n----------------\n\nThis document describes the xxHash digest algorithm, for both 32 and 64 variants, named `XXH32` and `XXH64`. The algorithm takes as input a message of arbitrary length and an optional seed value, it then produces an output of 32 or 64-bit as \"fingerprint\" or \"digest\".\n\nxxHash is primarily designed for speed. It is labelled non-cryptographic, and is not meant to avoid intentional collisions (same digest for 2 different messages), or to prevent producing a message with predefined digest.\n\nXXH32 is designed to be fast on 32-bits machines.\nXXH64 is designed to be fast on 64-bits machines.\nBoth variants produce different output.\nHowever, a given variant shall produce exactly the same output, irrespective of the cpu / os used. In particular, the result remains identical whatever the endianness and width of the cpu.\n\n### Operation notations\n\nAll operations are performed modulo {32,64} bits. Arithmetic overflows are expected.\n`XXH32` uses 32-bit modular operations. `XXH64` uses 64-bit modular operations.\n\n- `+` : denote modular addition\n- `*` : denote modular multiplication\n- `X <<< s` : denote the value obtained by circularly shifting (rotating) `X` left by `s` bit positions.  \n- `X >> s` : denote the value obtained by shifting `X` right by s bit positions. Upper `s` bits become `0`.  \n- `X xor Y` : denote the bit-wise XOR of `X` and `Y` (same width).\n\n\nXXH32 Algorithm Description\n-------------------------------------\n\n### Overview\n\nWe begin by supposing that we have a message of any length `L` as input, and that we wish to find its digest. Here `L` is an arbitrary nonnegative integer; `L` may be zero. The following steps are performed to compute the digest of the message.\n\nThe algorithm collect and transform input in _stripes_ of 16 bytes. The transforms are stored inside 4 \"accumulators\", each one storing an unsigned 32-bit value. Each accumulator can be processed independently in parallel, speeding up processing for cpu with multiple execution units.\n\nThe algorithm uses 32-bits addition, multiplication, rotate, shift and xor operations. Many operations require some 32-bits prime number constants, all defined below :\n\n    static const u32 PRIME32_1 = 2654435761U;\n    static const u32 PRIME32_2 = 2246822519U;\n    static const u32 PRIME32_3 = 3266489917U;\n    static const u32 PRIME32_4 =  668265263U;\n    static const u32 PRIME32_5 =  374761393U;\n\n### Step 1. Initialise internal accumulators\n\nEach accumulator gets an initial value based on optional `seed` input. Since the `seed` is optional, it can be `0`.\n\n        u32 acc1 = seed + PRIME32_1 + PRIME32_2;\n        u32 acc2 = seed + PRIME32_2;\n        u32 acc3 = seed + 0;\n        u32 acc4 = seed - PRIME32_1;\n\n#### Special case : input is less than 16 bytes\n\nWhen input is too small (< 16 bytes), the algorithm will not process any stripe. Consequently, it will not make use of parallel accumulators.\n\nIn which case, a simplified initialization is performed, using a single accumulator :\n\n      u32 acc  = seed + PRIME32_5;\n\nThe algorithm then proceeds directly to step 4.\n\n### Step 2. Process stripes\n\nA stripe is a contiguous segment of 16 bytes.\nIt is evenly divided into 4 _lanes_, of 4 bytes each.\nThe first lane is used to update accumulator 1, the second lane is used to update accumulator 2, and so on.\n\nEach lane read its associated 32-bit value using __little-endian__ convention.\n\nFor each {lane, accumulator}, the update process is called a _round_, and applies the following formula :\n\n    accN = accN + (laneN * PRIME32_2);\n    accN = accN <<< 13;\n    accN = accN * PRIME32_1;\n\nThis shuffles the bits so that any bit from input _lane_ impacts several bits in output _accumulator_. All operations are performed modulo 2^32.\n\nInput is consumed one full stripe at a time. Step 2 is looped as many times as necessary to consume the whole input, except the last remaining bytes which cannot form a stripe (< 16 bytes).\nWhen that happens, move to step 3.\n\n### Step 3. Accumulator convergence\n\nAll 4 lane accumulators from previous steps are merged to produce a single remaining accumulator of same width (32-bit). The associated formula is as follows :\n\n    acc = (acc1 <<< 1) + (acc2 <<< 7) + (acc3 <<< 12) + (acc4 <<< 18);\n\n### Step 4. Add input length\n\nThe input total length is presumed known at this stage. This step is just about adding the length to accumulator, so that it participates to final mixing.\n\n    acc = acc + (u32)inputLength;\n\nNote that, if input length is so large that it requires more than 32-bits, only the lower 32-bits are added to the accumulator.\n\n### Step 5. Consume remaining input\n\nThere may be up to 15 bytes remaining to consume from the input.\nThe final stage will digest them according to following pseudo-code :\n\n    while (remainingLength >= 4) {\n        lane = read_32bit_little_endian(input_ptr);\n        acc = acc + lane * PRIME32_3;\n        acc = (acc <<< 17) * PRIME32_4;\n        input_ptr += 4; remainingLength -= 4;\n    }\n\n    while (remainingLength >= 1) {\n        lane = read_byte(input_ptr);\n        acc = acc + lane * PRIME32_5;\n        acc = (acc <<< 11) * PRIME32_1;\n        input_ptr += 1; remainingLength -= 1;\n    }\n\nThis process ensures that all input bytes are present in the final mix.\n\n### Step 6. Final mix (avalanche)\n\nThe final mix ensures that all input bits have a chance to impact any bit in the output digest, resulting in an unbiased distribution. This is also called avalanche effect.\n\n    acc = acc xor (acc >> 15);\n    acc = acc * PRIME32_2;\n    acc = acc xor (acc >> 13);\n    acc = acc * PRIME32_3;\n    acc = acc xor (acc >> 16);\n\n### Step 7. Output\n\nThe `XXH32()` function produces an unsigned 32-bit value as output.\n\nFor systems which require to store and/or display the result in binary or hexadecimal format, the canonical format is defined to reproduce the same value as the natural decimal format, hence follows __big-endian__ convention (most significant byte first).\n\n\nXXH64 Algorithm Description\n-------------------------------------\n\n### Overview\n\n`XXH64` algorithm structure is very similar to `XXH32` one. The major difference is that `XXH64` uses 64-bit arithmetic, speeding up memory transfer for 64-bit compliant systems, but also relying on cpu capability to efficiently perform 64-bit operations.\n\nThe algorithm collects and transforms input in _stripes_ of 32 bytes. The transforms are stored inside 4 \"accumulators\", each one storing an unsigned 64-bit value. Each accumulator can be processed independently in parallel, speeding up processing for cpu with multiple execution units.\n\nThe algorithm uses 64-bit addition, multiplication, rotate, shift and xor operations. Many operations require some 64-bit prime number constants, all defined below :\n\n    static const u64 PRIME64_1 = 11400714785074694791ULL;\n    static const u64 PRIME64_2 = 14029467366897019727ULL;\n    static const u64 PRIME64_3 =  1609587929392839161ULL;\n    static const u64 PRIME64_4 =  9650029242287828579ULL;\n    static const u64 PRIME64_5 =  2870177450012600261ULL;\n\n### Step 1. Initialise internal accumulators\n\nEach accumulator gets an initial value based on optional `seed` input. Since the `seed` is optional, it can be `0`.\n\n        u64 acc1 = seed + PRIME64_1 + PRIME64_2;\n        u64 acc2 = seed + PRIME64_2;\n        u64 acc3 = seed + 0;\n        u64 acc4 = seed - PRIME64_1;\n\n#### Special case : input is less than 32 bytes\n\nWhen input is too small (< 32 bytes), the algorithm will not process any stripe. Consequently, it will not make use of parallel accumulators.\n\nIn which case, a simplified initialization is performed, using a single accumulator :\n\n      u64 acc  = seed + PRIME64_5;\n\nThe algorithm then proceeds directly to step 4.\n\n### Step 2. Process stripes\n\nA stripe is a contiguous segment of 32 bytes.\nIt is evenly divided into 4 _lanes_, of 8 bytes each.\nThe first lane is used to update accumulator 1, the second lane is used to update accumulator 2, and so on.\n\nEach lane read its associated 64-bit value using __little-endian__ convention.\n\nFor each {lane, accumulator}, the update process is called a _round_, and applies the following formula :\n\n    round(accN,laneN):\n    accN = accN + (laneN * PRIME64_2);\n    accN = accN <<< 31;\n    return accN * PRIME64_1;\n\nThis shuffles the bits so that any bit from input _lane_ impacts several bits in output _accumulator_. All operations are performed modulo 2^64.\n\nInput is consumed one full stripe at a time. Step 2 is looped as many times as necessary to consume the whole input, except the last remaining bytes which cannot form a stripe (< 32 bytes).\nWhen that happens, move to step 3.\n\n### Step 3. Accumulator convergence\n\nAll 4 lane accumulators from previous steps are merged to produce a single remaining accumulator of same width (64-bit). The associated formula is as follows.\n\nNote that accumulator convergence is more complex than 32-bit variant, and requires to define another function called _mergeAccumulator()_ :\n\n    mergeAccumulator(acc,accN):\n    acc  = acc xor round(0, accN);\n    acc  = acc * PRIME64_1\n    return acc + PRIME64_4;\n\nwhich is then used in the convergence formula :\n\n    acc = (acc1 <<< 1) + (acc2 <<< 7) + (acc3 <<< 12) + (acc4 <<< 18);\n    acc = mergeAccumulator(acc, acc1);\n    acc = mergeAccumulator(acc, acc2);\n    acc = mergeAccumulator(acc, acc3);\n    acc = mergeAccumulator(acc, acc4);\n\n### Step 4. Add input length\n\nThe input total length is presumed known at this stage. This step is just about adding the length to accumulator, so that it participates to final mixing.\n\n    acc = acc + inputLength;\n\n### Step 5. Consume remaining input\n\nThere may be up to 31 bytes remaining to consume from the input.\nThe final stage will digest them according to following pseudo-code :\n\n    while (remainingLength >= 8) {\n        lane = read_64bit_little_endian(input_ptr);\n        acc = acc xor round(0, lane);\n        acc = (acc <<< 27) * PRIME64_1;\n        acc = acc + PRIME64_4;\n        input_ptr += 8; remainingLength -= 8;\n    }\n\n    if (remainingLength >= 4) {\n        lane = read_32bit_little_endian(input_ptr);\n        acc = acc xor (lane * PRIME64_1);\n        acc = (acc <<< 23) * PRIME64_2;\n        acc = acc + PRIME64_3;\n        input_ptr += 4; remainingLength -= 4;\n    }\n\n    while (remainingLength >= 1) {\n        lane = read_byte(input_ptr);\n        acc = acc xor (lane * PRIME64_5);\n        acc = (acc <<< 11) * PRIME64_1;\n        input_ptr += 1; remainingLength -= 1;\n    }\n\nThis process ensures that all input bytes are present in the final mix.\n\n### Step 6. Final mix (avalanche)\n\nThe final mix ensures that all input bits have a chance to impact any bit in the output digest, resulting in an unbiased distribution. This is also called avalanche effect.\n\n    acc = acc xor (acc >> 33);\n    acc = acc * PRIME64_2;\n    acc = acc xor (acc >> 29);\n    acc = acc * PRIME64_3;\n    acc = acc xor (acc >> 32);\n\n### Step 7. Output\n\nThe `XXH64()` function produces an unsigned 64-bit value as output.\n\nFor systems which require to store and/or display the result in binary or hexadecimal format, the canonical format is defined to reproduce the same value as the natural decimal format, hence follows __big-endian__ convention (most significant byte first).\n\nPerformance considerations\n----------------------------------\n\nThe xxHash algorithms are simple and compact to implement. They provide a system independent \"fingerprint\" or digest of a message of arbitrary length.\n\nThe algorithm allows input to be streamed and processed in multiple steps. In such case, an internal buffer is needed to ensure data is presented to the algorithm in full stripes.\n\nOn 64-bit systems, the 64-bit variant `XXH64` is generally faster to compute, so it is a recommended variant, even when only 32-bit are needed.\n\nOn 32-bit systems though, positions are reversed : `XXH64` performance is reduced, due to its usage of 64-bit arithmetic. `XXH32` becomes a faster variant.\n\n\nReference Implementation\n----------------------------------------\n\nA reference library written in C is available at http://www.xxhash.com .\nThe web page also links to multiple other implementations written in many different languages.\nIt links to the [github project page](https://github.com/Cyan4973/xxHash) where an [issue board](https://github.com/Cyan4973/xxHash/issues) can be used for further public discussions on the topic.\n\n\nVersion changes\n--------------------\nv0.1.0 : initial release\n"
  },
  {
    "path": "extern/xxHash/xxhash.c",
    "content": "/*\n*  xxHash - Fast Hash algorithm\n*  Copyright (C) 2012-2016, Yann Collet\n*\n*  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n*\n*  Redistribution and use in source and binary forms, with or without\n*  modification, are permitted provided that the following conditions are\n*  met:\n*\n*  * Redistributions of source code must retain the above copyright\n*  notice, this list of conditions and the following disclaimer.\n*  * Redistributions in binary form must reproduce the above\n*  copyright notice, this list of conditions and the following disclaimer\n*  in the documentation and/or other materials provided with the\n*  distribution.\n*\n*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n*  \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n*  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n*  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n*  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n*  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n*  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n*  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n*  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n*  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*\n*  You can contact the author at :\n*  - xxHash homepage: http://www.xxhash.com\n*  - xxHash source repository : https://github.com/Cyan4973/xxHash\n*/\n\n\n/* *************************************\n*  Tuning parameters\n***************************************/\n/*!XXH_FORCE_MEMORY_ACCESS :\n * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.\n * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.\n * The below switch allow to select different access method for improved performance.\n * Method 0 (default) : use `memcpy()`. Safe and portable.\n * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).\n *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.\n * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.\n *            It can generate buggy code on targets which do not support unaligned memory accesses.\n *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)\n * See http://stackoverflow.com/a/32095106/646947 for details.\n * Prefer these methods in priority order (0 > 1 > 2)\n */\n#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */\n#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \\\n                        || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \\\n                        || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )\n#    define XXH_FORCE_MEMORY_ACCESS 2\n#  elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \\\n  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \\\n                    || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \\\n                    || defined(__ARM_ARCH_7S__) ))\n#    define XXH_FORCE_MEMORY_ACCESS 1\n#  endif\n#endif\n\n/*!XXH_ACCEPT_NULL_INPUT_POINTER :\n * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.\n * When this macro is enabled, xxHash actively checks input for null pointer.\n * It it is, result for null input pointers is the same as a null-length input.\n */\n#ifndef XXH_ACCEPT_NULL_INPUT_POINTER   /* can be defined externally */\n#  define XXH_ACCEPT_NULL_INPUT_POINTER 0\n#endif\n\n/*!XXH_FORCE_NATIVE_FORMAT :\n * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.\n * Results are therefore identical for little-endian and big-endian CPU.\n * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.\n * Should endian-independence be of no importance for your application, you may set the #define below to 1,\n * to improve speed for Big-endian CPU.\n * This option has no impact on Little_Endian CPU.\n */\n#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */\n#  define XXH_FORCE_NATIVE_FORMAT 0\n#endif\n\n/*!XXH_FORCE_ALIGN_CHECK :\n * This is a minor performance trick, only useful with lots of very small keys.\n * It means : check for aligned/unaligned input.\n * The check costs one initial branch per hash;\n * set it to 0 when the input is guaranteed to be aligned,\n * or when alignment doesn't matter for performance.\n */\n#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */\n#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)\n#    define XXH_FORCE_ALIGN_CHECK 0\n#  else\n#    define XXH_FORCE_ALIGN_CHECK 1\n#  endif\n#endif\n\n\n/* *************************************\n*  Includes & Memory related functions\n***************************************/\n/*! Modify the local functions below should you wish to use some other memory routines\n*   for malloc(), free() */\n#include <stdlib.h>\nstatic void* XXH_malloc(size_t s) { return malloc(s); }\nstatic void  XXH_free  (void* p)  { free(p); }\n/*! and for memcpy() */\n#include <string.h>\nstatic void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }\n\n#include <assert.h>   /* assert */\n\n#define XXH_STATIC_LINKING_ONLY\n#include \"xxhash.h\"\n\n\n/* *************************************\n*  Compiler Specific Options\n***************************************/\n#ifdef _MSC_VER    /* Visual Studio */\n#  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */\n#  define FORCE_INLINE static __forceinline\n#else\n#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#    ifdef __GNUC__\n#      define FORCE_INLINE static inline __attribute__((always_inline))\n#    else\n#      define FORCE_INLINE static inline\n#    endif\n#  else\n#    define FORCE_INLINE static\n#  endif /* __STDC_VERSION__ */\n#endif\n\n\n/* *************************************\n*  Basic Types\n***************************************/\n#ifndef MEM_MODULE\n# if !defined (__VMS) \\\n  && (defined (__cplusplus) \\\n  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )\n#   include <stdint.h>\n    typedef uint8_t  BYTE;\n    typedef uint16_t U16;\n    typedef uint32_t U32;\n# else\n    typedef unsigned char      BYTE;\n    typedef unsigned short     U16;\n    typedef unsigned int       U32;\n# endif\n#endif\n\n#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))\n\n/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */\nstatic U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }\n\n#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U32 u32; } __attribute__((packed)) unalign;\nstatic U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }\n\n#else\n\n/* portable and safe solution. Generally efficient.\n * see : http://stackoverflow.com/a/32095106/646947\n */\nstatic U32 XXH_read32(const void* memPtr)\n{\n    U32 val;\n    memcpy(&val, memPtr, sizeof(val));\n    return val;\n}\n\n#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */\n\n\n/* ****************************************\n*  Compiler-specific Functions and Macros\n******************************************/\n#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)\n\n/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */\n#if defined(_MSC_VER)\n#  define XXH_rotl32(x,r) _rotl(x,r)\n#  define XXH_rotl64(x,r) _rotl64(x,r)\n#else\n#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))\n#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))\n#endif\n\n#if defined(_MSC_VER)     /* Visual Studio */\n#  define XXH_swap32 _byteswap_ulong\n#elif XXH_GCC_VERSION >= 403\n#  define XXH_swap32 __builtin_bswap32\n#else\nstatic U32 XXH_swap32 (U32 x)\n{\n    return  ((x << 24) & 0xff000000 ) |\n            ((x <<  8) & 0x00ff0000 ) |\n            ((x >>  8) & 0x0000ff00 ) |\n            ((x >> 24) & 0x000000ff );\n}\n#endif\n\n\n/* *************************************\n*  Architecture Macros\n***************************************/\ntypedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;\n\n/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */\n#ifndef XXH_CPU_LITTLE_ENDIAN\nstatic int XXH_isLittleEndian(void)\n{\n    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n#   define XXH_CPU_LITTLE_ENDIAN   XXH_isLittleEndian()\n#endif\n\n\n/* ***************************\n*  Memory reads\n*****************************/\ntypedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;\n\nFORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)\n{\n    if (align==XXH_unaligned)\n        return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));\n    else\n        return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);\n}\n\nFORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)\n{\n    return XXH_readLE32_align(ptr, endian, XXH_unaligned);\n}\n\nstatic U32 XXH_readBE32(const void* ptr)\n{\n    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);\n}\n\n\n/* *************************************\n*  Macros\n***************************************/\n#define XXH_STATIC_ASSERT(c)  { enum { XXH_sa = 1/(int)(!!(c)) }; }  /* use after variable declarations */\nXXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }\n\n\n/* *******************************************************************\n*  32-bit hash functions\n*********************************************************************/\nstatic const U32 PRIME32_1 = 2654435761U;\nstatic const U32 PRIME32_2 = 2246822519U;\nstatic const U32 PRIME32_3 = 3266489917U;\nstatic const U32 PRIME32_4 =  668265263U;\nstatic const U32 PRIME32_5 =  374761393U;\n\nstatic U32 XXH32_round(U32 seed, U32 input)\n{\n    seed += input * PRIME32_2;\n    seed  = XXH_rotl32(seed, 13);\n    seed *= PRIME32_1;\n    return seed;\n}\n\n/* mix all bits */\nstatic U32 XXH32_avalanche(U32 h32)\n{\n    h32 ^= h32 >> 15;\n    h32 *= PRIME32_2;\n    h32 ^= h32 >> 13;\n    h32 *= PRIME32_3;\n    h32 ^= h32 >> 16;\n    return(h32);\n}\n\n#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)\n\nstatic U32\nXXH32_finalize(U32 h32, const void* ptr, size_t len,\n                XXH_endianess endian, XXH_alignment align)\n\n{\n    const BYTE* p = (const BYTE*)ptr;\n#define PROCESS1             \\\n    h32 += (*p) * PRIME32_5; \\\n    p++;                     \\\n    h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;\n\n#define PROCESS4                         \\\n    h32 += XXH_get32bits(p) * PRIME32_3; \\\n    p+=4;                                \\\n    h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;\n\n    switch(len&15)  /* or switch(bEnd - p) */\n    {\n      case 12:      PROCESS4;\n                    /* fallthrough */\n      case 8:       PROCESS4;\n                    /* fallthrough */\n      case 4:       PROCESS4;\n                    return XXH32_avalanche(h32);\n\n      case 13:      PROCESS4;\n                    /* fallthrough */\n      case 9:       PROCESS4;\n                    /* fallthrough */\n      case 5:       PROCESS4;\n                    PROCESS1;\n                    return XXH32_avalanche(h32);\n\n      case 14:      PROCESS4;\n                    /* fallthrough */\n      case 10:      PROCESS4;\n                    /* fallthrough */\n      case 6:       PROCESS4;\n                    PROCESS1;\n                    PROCESS1;\n                    return XXH32_avalanche(h32);\n\n      case 15:      PROCESS4;\n                    /* fallthrough */\n      case 11:      PROCESS4;\n                    /* fallthrough */\n      case 7:       PROCESS4;\n                    /* fallthrough */\n      case 3:       PROCESS1;\n                    /* fallthrough */\n      case 2:       PROCESS1;\n                    /* fallthrough */\n      case 1:       PROCESS1;\n                    /* fallthrough */\n      case 0:       return XXH32_avalanche(h32);\n    }\n    assert(0);\n    return h32;   /* reaching this point is deemed impossible */\n}\n\n\nFORCE_INLINE U32\nXXH32_endian_align(const void* input, size_t len, U32 seed,\n                    XXH_endianess endian, XXH_alignment align)\n{\n    const BYTE* p = (const BYTE*)input;\n    const BYTE* bEnd = p + len;\n    U32 h32;\n\n#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)\n    if (p==NULL) {\n        len=0;\n        bEnd=p=(const BYTE*)(size_t)16;\n    }\n#endif\n\n    if (len>=16) {\n        const BYTE* const limit = bEnd - 15;\n        U32 v1 = seed + PRIME32_1 + PRIME32_2;\n        U32 v2 = seed + PRIME32_2;\n        U32 v3 = seed + 0;\n        U32 v4 = seed - PRIME32_1;\n\n        do {\n            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;\n            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;\n            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;\n            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;\n        } while (p < limit);\n\n        h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)\n            + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);\n    } else {\n        h32  = seed + PRIME32_5;\n    }\n\n    h32 += (U32)len;\n\n    return XXH32_finalize(h32, p, len&15, endian, align);\n}\n\n\nXXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)\n{\n#if 0\n    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */\n    XXH32_state_t state;\n    XXH32_reset(&state, seed);\n    XXH32_update(&state, input, len);\n    return XXH32_digest(&state);\n#else\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if (XXH_FORCE_ALIGN_CHECK) {\n        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */\n            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n                return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);\n            else\n                return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);\n    }   }\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);\n    else\n        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);\n#endif\n}\n\n\n\n/*======   Hash streaming   ======*/\n\nXXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)\n{\n    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));\n}\nXXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)\n{\n    XXH_free(statePtr);\n    return XXH_OK;\n}\n\nXXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)\n{\n    memcpy(dstState, srcState, sizeof(*dstState));\n}\n\nXXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)\n{\n    XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */\n    memset(&state, 0, sizeof(state));\n    state.v1 = seed + PRIME32_1 + PRIME32_2;\n    state.v2 = seed + PRIME32_2;\n    state.v3 = seed + 0;\n    state.v4 = seed - PRIME32_1;\n    /* do not write into reserved, planned to be removed in a future version */\n    memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));\n    return XXH_OK;\n}\n\n\nFORCE_INLINE\nXXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)\n{\n    const BYTE* p = (const BYTE*)input;\n    const BYTE* const bEnd = p + len;\n\n    if (input==NULL)\n#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)\n        return XXH_OK;\n#else\n        return XXH_ERROR;\n#endif\n\n    state->total_len_32 += (unsigned)len;\n    state->large_len |= (len>=16) | (state->total_len_32>=16);\n\n    if (state->memsize + len < 16)  {   /* fill in tmp buffer */\n        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);\n        state->memsize += (unsigned)len;\n        return XXH_OK;\n    }\n\n    if (state->memsize) {   /* some data left from previous update */\n        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);\n        {   const U32* p32 = state->mem32;\n            state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;\n            state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;\n            state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;\n            state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian));\n        }\n        p += 16-state->memsize;\n        state->memsize = 0;\n    }\n\n    if (p <= bEnd-16) {\n        const BYTE* const limit = bEnd - 16;\n        U32 v1 = state->v1;\n        U32 v2 = state->v2;\n        U32 v3 = state->v3;\n        U32 v4 = state->v4;\n\n        do {\n            v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;\n            v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;\n            v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;\n            v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;\n        } while (p<=limit);\n\n        state->v1 = v1;\n        state->v2 = v2;\n        state->v3 = v3;\n        state->v4 = v4;\n    }\n\n    if (p < bEnd) {\n        XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));\n        state->memsize = (unsigned)(bEnd-p);\n    }\n\n    return XXH_OK;\n}\n\n\nXXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)\n{\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);\n    else\n        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);\n}\n\n\nFORCE_INLINE U32\nXXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)\n{\n    U32 h32;\n\n    if (state->large_len) {\n        h32 = XXH_rotl32(state->v1, 1)\n            + XXH_rotl32(state->v2, 7)\n            + XXH_rotl32(state->v3, 12)\n            + XXH_rotl32(state->v4, 18);\n    } else {\n        h32 = state->v3 /* == seed */ + PRIME32_5;\n    }\n\n    h32 += state->total_len_32;\n\n    return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned);\n}\n\n\nXXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)\n{\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH32_digest_endian(state_in, XXH_littleEndian);\n    else\n        return XXH32_digest_endian(state_in, XXH_bigEndian);\n}\n\n\n/*======   Canonical representation   ======*/\n\n/*! Default XXH result types are basic unsigned 32 and 64 bits.\n*   The canonical representation follows human-readable write convention, aka big-endian (large digits first).\n*   These functions allow transformation of hash result into and from its canonical format.\n*   This way, hash values can be written into a file or buffer, remaining comparable across different systems.\n*/\n\nXXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)\n{\n    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));\n    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);\n    memcpy(dst, &hash, sizeof(*dst));\n}\n\nXXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)\n{\n    return XXH_readBE32(src);\n}\n\n\n#ifndef XXH_NO_LONG_LONG\n\n/* *******************************************************************\n*  64-bit hash functions\n*********************************************************************/\n\n/*======   Memory access   ======*/\n\n#ifndef MEM_MODULE\n# define MEM_MODULE\n# if !defined (__VMS) \\\n  && (defined (__cplusplus) \\\n  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )\n#   include <stdint.h>\n    typedef uint64_t U64;\n# else\n    /* if compiler doesn't support unsigned long long, replace by another 64-bit type */\n    typedef unsigned long long U64;\n# endif\n#endif\n\n\n#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))\n\n/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */\nstatic U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }\n\n#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))\n\n/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */\n/* currently only defined for gcc and icc */\ntypedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;\nstatic U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }\n\n#else\n\n/* portable and safe solution. Generally efficient.\n * see : http://stackoverflow.com/a/32095106/646947\n */\n\nstatic U64 XXH_read64(const void* memPtr)\n{\n    U64 val;\n    memcpy(&val, memPtr, sizeof(val));\n    return val;\n}\n\n#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */\n\n#if defined(_MSC_VER)     /* Visual Studio */\n#  define XXH_swap64 _byteswap_uint64\n#elif XXH_GCC_VERSION >= 403\n#  define XXH_swap64 __builtin_bswap64\n#else\nstatic U64 XXH_swap64 (U64 x)\n{\n    return  ((x << 56) & 0xff00000000000000ULL) |\n            ((x << 40) & 0x00ff000000000000ULL) |\n            ((x << 24) & 0x0000ff0000000000ULL) |\n            ((x << 8)  & 0x000000ff00000000ULL) |\n            ((x >> 8)  & 0x00000000ff000000ULL) |\n            ((x >> 24) & 0x0000000000ff0000ULL) |\n            ((x >> 40) & 0x000000000000ff00ULL) |\n            ((x >> 56) & 0x00000000000000ffULL);\n}\n#endif\n\nFORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)\n{\n    if (align==XXH_unaligned)\n        return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));\n    else\n        return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);\n}\n\nFORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)\n{\n    return XXH_readLE64_align(ptr, endian, XXH_unaligned);\n}\n\nstatic U64 XXH_readBE64(const void* ptr)\n{\n    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);\n}\n\n\n/*======   xxh64   ======*/\n\nstatic const U64 PRIME64_1 = 11400714785074694791ULL;\nstatic const U64 PRIME64_2 = 14029467366897019727ULL;\nstatic const U64 PRIME64_3 =  1609587929392839161ULL;\nstatic const U64 PRIME64_4 =  9650029242287828579ULL;\nstatic const U64 PRIME64_5 =  2870177450012600261ULL;\n\nstatic U64 XXH64_round(U64 acc, U64 input)\n{\n    acc += input * PRIME64_2;\n    acc  = XXH_rotl64(acc, 31);\n    acc *= PRIME64_1;\n    return acc;\n}\n\nstatic U64 XXH64_mergeRound(U64 acc, U64 val)\n{\n    val  = XXH64_round(0, val);\n    acc ^= val;\n    acc  = acc * PRIME64_1 + PRIME64_4;\n    return acc;\n}\n\nstatic U64 XXH64_avalanche(U64 h64)\n{\n    h64 ^= h64 >> 33;\n    h64 *= PRIME64_2;\n    h64 ^= h64 >> 29;\n    h64 *= PRIME64_3;\n    h64 ^= h64 >> 32;\n    return h64;\n}\n\n\n#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)\n\nstatic U64\nXXH64_finalize(U64 h64, const void* ptr, size_t len,\n               XXH_endianess endian, XXH_alignment align)\n{\n    const BYTE* p = (const BYTE*)ptr;\n\n#define PROCESS1_64          \\\n    h64 ^= (*p) * PRIME64_5; \\\n    p++;                     \\\n    h64 = XXH_rotl64(h64, 11) * PRIME64_1;\n\n#define PROCESS4_64          \\\n    h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \\\n    p+=4;                    \\\n    h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;\n\n#define PROCESS8_64 {        \\\n    U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \\\n    p+=8;                    \\\n    h64 ^= k1;               \\\n    h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \\\n}\n\n    switch(len&31) {\n      case 24: PROCESS8_64;\n                    /* fallthrough */\n      case 16: PROCESS8_64;\n                    /* fallthrough */\n      case  8: PROCESS8_64;\n               return XXH64_avalanche(h64);\n\n      case 28: PROCESS8_64;\n                    /* fallthrough */\n      case 20: PROCESS8_64;\n                    /* fallthrough */\n      case 12: PROCESS8_64;\n                    /* fallthrough */\n      case  4: PROCESS4_64;\n               return XXH64_avalanche(h64);\n\n      case 25: PROCESS8_64;\n                    /* fallthrough */\n      case 17: PROCESS8_64;\n                    /* fallthrough */\n      case  9: PROCESS8_64;\n               PROCESS1_64;\n               return XXH64_avalanche(h64);\n\n      case 29: PROCESS8_64;\n                    /* fallthrough */\n      case 21: PROCESS8_64;\n                    /* fallthrough */\n      case 13: PROCESS8_64;\n                    /* fallthrough */\n      case  5: PROCESS4_64;\n               PROCESS1_64;\n               return XXH64_avalanche(h64);\n\n      case 26: PROCESS8_64;\n                    /* fallthrough */\n      case 18: PROCESS8_64;\n                    /* fallthrough */\n      case 10: PROCESS8_64;\n               PROCESS1_64;\n               PROCESS1_64;\n               return XXH64_avalanche(h64);\n\n      case 30: PROCESS8_64;\n                    /* fallthrough */\n      case 22: PROCESS8_64;\n                    /* fallthrough */\n      case 14: PROCESS8_64;\n                    /* fallthrough */\n      case  6: PROCESS4_64;\n               PROCESS1_64;\n               PROCESS1_64;\n               return XXH64_avalanche(h64);\n\n      case 27: PROCESS8_64;\n                    /* fallthrough */\n      case 19: PROCESS8_64;\n                    /* fallthrough */\n      case 11: PROCESS8_64;\n               PROCESS1_64;\n               PROCESS1_64;\n               PROCESS1_64;\n               return XXH64_avalanche(h64);\n\n      case 31: PROCESS8_64;\n                    /* fallthrough */\n      case 23: PROCESS8_64;\n                    /* fallthrough */\n      case 15: PROCESS8_64;\n                    /* fallthrough */\n      case  7: PROCESS4_64;\n                    /* fallthrough */\n      case  3: PROCESS1_64;\n                    /* fallthrough */\n      case  2: PROCESS1_64;\n                    /* fallthrough */\n      case  1: PROCESS1_64;\n                    /* fallthrough */\n      case  0: return XXH64_avalanche(h64);\n    }\n\n    /* impossible to reach */\n    assert(0);\n    return 0;  /* unreachable, but some compilers complain without it */\n}\n\nFORCE_INLINE U64\nXXH64_endian_align(const void* input, size_t len, U64 seed,\n                XXH_endianess endian, XXH_alignment align)\n{\n    const BYTE* p = (const BYTE*)input;\n    const BYTE* bEnd = p + len;\n    U64 h64;\n\n#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)\n    if (p==NULL) {\n        len=0;\n        bEnd=p=(const BYTE*)(size_t)32;\n    }\n#endif\n\n    if (len>=32) {\n        const BYTE* const limit = bEnd - 32;\n        U64 v1 = seed + PRIME64_1 + PRIME64_2;\n        U64 v2 = seed + PRIME64_2;\n        U64 v3 = seed + 0;\n        U64 v4 = seed - PRIME64_1;\n\n        do {\n            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;\n            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;\n            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;\n            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;\n        } while (p<=limit);\n\n        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);\n        h64 = XXH64_mergeRound(h64, v1);\n        h64 = XXH64_mergeRound(h64, v2);\n        h64 = XXH64_mergeRound(h64, v3);\n        h64 = XXH64_mergeRound(h64, v4);\n\n    } else {\n        h64  = seed + PRIME64_5;\n    }\n\n    h64 += (U64) len;\n\n    return XXH64_finalize(h64, p, len, endian, align);\n}\n\n\nXXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)\n{\n#if 0\n    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */\n    XXH64_state_t state;\n    XXH64_reset(&state, seed);\n    XXH64_update(&state, input, len);\n    return XXH64_digest(&state);\n#else\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if (XXH_FORCE_ALIGN_CHECK) {\n        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */\n            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n                return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);\n            else\n                return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);\n    }   }\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);\n    else\n        return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);\n#endif\n}\n\n/*======   Hash Streaming   ======*/\n\nXXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)\n{\n    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));\n}\nXXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)\n{\n    XXH_free(statePtr);\n    return XXH_OK;\n}\n\nXXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)\n{\n    memcpy(dstState, srcState, sizeof(*dstState));\n}\n\nXXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)\n{\n    XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */\n    memset(&state, 0, sizeof(state));\n    state.v1 = seed + PRIME64_1 + PRIME64_2;\n    state.v2 = seed + PRIME64_2;\n    state.v3 = seed + 0;\n    state.v4 = seed - PRIME64_1;\n     /* do not write into reserved, planned to be removed in a future version */\n    memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));\n    return XXH_OK;\n}\n\nFORCE_INLINE\nXXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)\n{\n    const BYTE* p = (const BYTE*)input;\n    const BYTE* const bEnd = p + len;\n\n    if (input==NULL)\n#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)\n        return XXH_OK;\n#else\n        return XXH_ERROR;\n#endif\n\n    state->total_len += len;\n\n    if (state->memsize + len < 32) {  /* fill in tmp buffer */\n        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);\n        state->memsize += (U32)len;\n        return XXH_OK;\n    }\n\n    if (state->memsize) {   /* tmp buffer is full */\n        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);\n        state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));\n        state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));\n        state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));\n        state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));\n        p += 32-state->memsize;\n        state->memsize = 0;\n    }\n\n    if (p+32 <= bEnd) {\n        const BYTE* const limit = bEnd - 32;\n        U64 v1 = state->v1;\n        U64 v2 = state->v2;\n        U64 v3 = state->v3;\n        U64 v4 = state->v4;\n\n        do {\n            v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;\n            v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;\n            v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;\n            v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;\n        } while (p<=limit);\n\n        state->v1 = v1;\n        state->v2 = v2;\n        state->v3 = v3;\n        state->v4 = v4;\n    }\n\n    if (p < bEnd) {\n        XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));\n        state->memsize = (unsigned)(bEnd-p);\n    }\n\n    return XXH_OK;\n}\n\nXXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)\n{\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);\n    else\n        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);\n}\n\nFORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)\n{\n    U64 h64;\n\n    if (state->total_len >= 32) {\n        U64 const v1 = state->v1;\n        U64 const v2 = state->v2;\n        U64 const v3 = state->v3;\n        U64 const v4 = state->v4;\n\n        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);\n        h64 = XXH64_mergeRound(h64, v1);\n        h64 = XXH64_mergeRound(h64, v2);\n        h64 = XXH64_mergeRound(h64, v3);\n        h64 = XXH64_mergeRound(h64, v4);\n    } else {\n        h64  = state->v3 /*seed*/ + PRIME64_5;\n    }\n\n    h64 += (U64) state->total_len;\n\n    return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned);\n}\n\nXXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)\n{\n    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;\n\n    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)\n        return XXH64_digest_endian(state_in, XXH_littleEndian);\n    else\n        return XXH64_digest_endian(state_in, XXH_bigEndian);\n}\n\n\n/*====== Canonical representation   ======*/\n\nXXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)\n{\n    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));\n    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);\n    memcpy(dst, &hash, sizeof(*dst));\n}\n\nXXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)\n{\n    return XXH_readBE64(src);\n}\n\n#endif  /* XXH_NO_LONG_LONG */\n"
  },
  {
    "path": "extern/xxHash/xxhash.h",
    "content": "/*\n   xxHash - Extremely Fast Hash algorithm\n   Header File\n   Copyright (C) 2012-2016, Yann Collet.\n\n   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n\n   Redistribution and use in source and binary forms, with or without\n   modification, are permitted provided that the following conditions are\n   met:\n\n       * Redistributions of source code must retain the above copyright\n   notice, this list of conditions and the following disclaimer.\n       * Redistributions in binary form must reproduce the above\n   copyright notice, this list of conditions and the following disclaimer\n   in the documentation and/or other materials provided with the\n   distribution.\n\n   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n   You can contact the author at :\n   - xxHash source repository : https://github.com/Cyan4973/xxHash\n*/\n\n/* Notice extracted from xxHash homepage :\n\nxxHash is an extremely fast Hash algorithm, running at RAM speed limits.\nIt also successfully passes all tests from the SMHasher suite.\n\nComparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)\n\nName            Speed       Q.Score   Author\nxxHash          5.4 GB/s     10\nCrapWow         3.2 GB/s      2       Andrew\nMumurHash 3a    2.7 GB/s     10       Austin Appleby\nSpookyHash      2.0 GB/s     10       Bob Jenkins\nSBox            1.4 GB/s      9       Bret Mulvey\nLookup3         1.2 GB/s      9       Bob Jenkins\nSuperFastHash   1.2 GB/s      1       Paul Hsieh\nCityHash64      1.05 GB/s    10       Pike & Alakuijala\nFNV             0.55 GB/s     5       Fowler, Noll, Vo\nCRC32           0.43 GB/s     9\nMD5-32          0.33 GB/s    10       Ronald L. Rivest\nSHA1-32         0.28 GB/s    10\n\nQ.Score is a measure of quality of the hash function.\nIt depends on successfully passing SMHasher test set.\n10 is a perfect score.\n\nA 64-bit version, named XXH64, is available since r35.\nIt offers much better speed, but for 64-bit applications only.\nName     Speed on 64 bits    Speed on 32 bits\nXXH64       13.8 GB/s            1.9 GB/s\nXXH32        6.8 GB/s            6.0 GB/s\n*/\n\n#ifndef XXHASH_H_5627135585666179\n#define XXHASH_H_5627135585666179 1\n\n#if defined (__cplusplus)\nextern \"C\" {\n#endif\n\n\n/* ****************************\n*  Definitions\n******************************/\n#include <stddef.h>   /* size_t */\ntypedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;\n\n\n/* ****************************\n *  API modifier\n ******************************/\n/** XXH_INLINE_ALL (and XXH_PRIVATE_API)\n *  This is useful to include xxhash functions in `static` mode\n *  in order to inline them, and remove their symbol from the public list.\n *  Inlining can offer dramatic performance improvement on small keys.\n *  Methodology :\n *     #define XXH_INLINE_ALL\n *     #include \"xxhash.h\"\n * `xxhash.c` is automatically included.\n *  It's not useful to compile and link it as a separate module.\n */\n#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)\n#  ifndef XXH_STATIC_LINKING_ONLY\n#    define XXH_STATIC_LINKING_ONLY\n#  endif\n#  if defined(__GNUC__)\n#    define XXH_PUBLIC_API static __inline __attribute__((unused))\n#  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)\n#    define XXH_PUBLIC_API static inline\n#  elif defined(_MSC_VER)\n#    define XXH_PUBLIC_API static __inline\n#  else\n     /* this version may generate warnings for unused static functions */\n#    define XXH_PUBLIC_API static\n#  endif\n#else\n#  define XXH_PUBLIC_API   /* do nothing */\n#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */\n\n/*! XXH_NAMESPACE, aka Namespace Emulation :\n *\n * If you want to include _and expose_ xxHash functions from within your own library,\n * but also want to avoid symbol collisions with other libraries which may also include xxHash,\n *\n * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library\n * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).\n *\n * Note that no change is required within the calling program as long as it includes `xxhash.h` :\n * regular symbol name will be automatically translated by this header.\n */\n#ifdef XXH_NAMESPACE\n#  define XXH_CAT(A,B) A##B\n#  define XXH_NAME2(A,B) XXH_CAT(A,B)\n#  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)\n#  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)\n#  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)\n#  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)\n#  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)\n#  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)\n#  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)\n#  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)\n#  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)\n#  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)\n#  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)\n#  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)\n#  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)\n#  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)\n#  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)\n#  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)\n#  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)\n#  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)\n#  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)\n#endif\n\n\n/* *************************************\n*  Version\n***************************************/\n#define XXH_VERSION_MAJOR    0\n#define XXH_VERSION_MINOR    6\n#define XXH_VERSION_RELEASE  5\n#define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)\nXXH_PUBLIC_API unsigned XXH_versionNumber (void);\n\n\n/*-**********************************************************************\n*  32-bit hash\n************************************************************************/\ntypedef unsigned int XXH32_hash_t;\n\n/*! XXH32() :\n    Calculate the 32-bit hash of sequence \"length\" bytes stored at memory address \"input\".\n    The memory between input & input+length must be valid (allocated and read-accessible).\n    \"seed\" can be used to alter the result predictably.\n    Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */\nXXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);\n\n/*======   Streaming   ======*/\ntypedef struct XXH32_state_s XXH32_state_t;   /* incomplete type */\nXXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);\nXXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);\nXXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);\n\nXXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, unsigned int seed);\nXXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);\nXXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);\n\n/*\n * Streaming functions generate the xxHash of an input provided in multiple segments.\n * Note that, for small input, they are slower than single-call functions, due to state management.\n * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.\n *\n * XXH state must first be allocated, using XXH*_createState() .\n *\n * Start a new hash by initializing state with a seed, using XXH*_reset().\n *\n * Then, feed the hash state by calling XXH*_update() as many times as necessary.\n * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.\n *\n * Finally, a hash value can be produced anytime, by using XXH*_digest().\n * This function returns the nn-bits hash as an int or long long.\n *\n * It's still possible to continue inserting input into the hash state after a digest,\n * and generate some new hashes later on, by calling again XXH*_digest().\n *\n * When done, free XXH state space if it was allocated dynamically.\n */\n\n/*======   Canonical representation   ======*/\n\ntypedef struct { unsigned char digest[4]; } XXH32_canonical_t;\nXXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);\nXXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);\n\n/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.\n * The canonical representation uses human-readable write convention, aka big-endian (large digits first).\n * These functions allow transformation of hash result into and from its canonical format.\n * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.\n */\n\n\n#ifndef XXH_NO_LONG_LONG\n/*-**********************************************************************\n*  64-bit hash\n************************************************************************/\ntypedef unsigned long long XXH64_hash_t;\n\n/*! XXH64() :\n    Calculate the 64-bit hash of sequence of length \"len\" stored at memory address \"input\".\n    \"seed\" can be used to alter the result predictably.\n    This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).\n*/\nXXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);\n\n/*======   Streaming   ======*/\ntypedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */\nXXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);\nXXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);\nXXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);\n\nXXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, unsigned long long seed);\nXXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);\nXXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);\n\n/*======   Canonical representation   ======*/\ntypedef struct { unsigned char digest[8]; } XXH64_canonical_t;\nXXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);\nXXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);\n#endif  /* XXH_NO_LONG_LONG */\n\n\n\n#ifdef XXH_STATIC_LINKING_ONLY\n\n/* ================================================================================================\n   This section contains declarations which are not guaranteed to remain stable.\n   They may change in future versions, becoming incompatible with a different version of the library.\n   These declarations should only be used with static linking.\n   Never use them in association with dynamic linking !\n=================================================================================================== */\n\n/* These definitions are only present to allow\n * static allocation of XXH state, on stack or in a struct for example.\n * Never **ever** use members directly. */\n\n#if !defined (__VMS) \\\n  && (defined (__cplusplus) \\\n  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )\n#   include <stdint.h>\n\nstruct XXH32_state_s {\n   uint32_t total_len_32;\n   uint32_t large_len;\n   uint32_t v1;\n   uint32_t v2;\n   uint32_t v3;\n   uint32_t v4;\n   uint32_t mem32[4];\n   uint32_t memsize;\n   uint32_t reserved;   /* never read nor write, might be removed in a future version */\n};   /* typedef'd to XXH32_state_t */\n\nstruct XXH64_state_s {\n   uint64_t total_len;\n   uint64_t v1;\n   uint64_t v2;\n   uint64_t v3;\n   uint64_t v4;\n   uint64_t mem64[4];\n   uint32_t memsize;\n   uint32_t reserved[2];          /* never read nor write, might be removed in a future version */\n};   /* typedef'd to XXH64_state_t */\n\n# else\n\nstruct XXH32_state_s {\n   unsigned total_len_32;\n   unsigned large_len;\n   unsigned v1;\n   unsigned v2;\n   unsigned v3;\n   unsigned v4;\n   unsigned mem32[4];\n   unsigned memsize;\n   unsigned reserved;   /* never read nor write, might be removed in a future version */\n};   /* typedef'd to XXH32_state_t */\n\n#   ifndef XXH_NO_LONG_LONG  /* remove 64-bit support */\nstruct XXH64_state_s {\n   unsigned long long total_len;\n   unsigned long long v1;\n   unsigned long long v2;\n   unsigned long long v3;\n   unsigned long long v4;\n   unsigned long long mem64[4];\n   unsigned memsize;\n   unsigned reserved[2];     /* never read nor write, might be removed in a future version */\n};   /* typedef'd to XXH64_state_t */\n#    endif\n\n# endif\n\n\n#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)\n#  include \"xxhash.c\"   /* include xxhash function bodies as `static`, for inlining */\n#endif\n\n#endif /* XXH_STATIC_LINKING_ONLY */\n\n\n#if defined (__cplusplus)\n}\n#endif\n\n#endif /* XXHASH_H_5627135585666179 */\n"
  },
  {
    "path": "extern/xxHash/xxhsum.1",
    "content": ".\n.TH \"XXHSUM\" \"1\" \"September 2017\" \"xxhsum 0.6.3\" \"User Commands\"\n.\n.SH \"NAME\"\n\\fBxxhsum\\fR \\- print or check xxHash non\\-cryptographic checksums\n.\n.SH \"SYNOPSIS\"\n\\fBxxhsum [<OPTION>] \\.\\.\\. [<FILE>] \\.\\.\\.\\fR\n.\n.br\n\\fBxxhsum \\-b [<OPTION>] \\.\\.\\.\\fR\n.\n.P\n\\fBxxh32sum\\fR is equivalent to \\fBxxhsum \\-H0\\fR\n.\n.br\n\\fBxxh64sum\\fR is equivalent to \\fBxxhsum \\-H1\\fR\n.\n.SH \"DESCRIPTION\"\nPrint or check xxHash (32 or 64bit) checksums\\. When \\fIFILE\\fR is \\fB\\-\\fR, read standard input\\.\n.\n.P\n\\fBxxhsum\\fR supports a command line syntax similar but not identical to md5sum(1)\\. Differences are: \\fBxxhsum\\fR doesn\\'t have text/binary mode switch (\\fB\\-b\\fR, \\fB\\-t\\fR); \\fBxxhsum\\fR always treats file as binary file; \\fBxxhsum\\fR has hash bit width switch (\\fB\\-H\\fR);\n.\n.P\nAs xxHash is a fast non\\-cryptographic checksum algorithm, \\fBxxhsum\\fR should not be used for security related purposes\\.\n.\n.P\n\\fBxxhsum \\-b\\fR invokes benchmark mode\\. See \\fIOPTIONS\\fR and \\fIEXAMPLES\\fR for details\\.\n.\n.SH \"OPTIONS\"\n.\n.TP\n\\fB\\-V\\fR, \\fB\\-\\-version\\fR\nDisplay xxhsum version\n.\n.TP\n\\fB\\-H\\fR\\fIHASHTYPE\\fR\nHash selection\\. \\fIHASHTYPE\\fR means \\fB0\\fR=32bits, \\fB1\\fR=64bits\\. Default value is \\fB1\\fR (64bits)\n.\n.TP\n\\fB\\-\\-little\\-endian\\fR\nSet output hexadecimal checksum value as little endian convention\\. By default, value is displayed as big endian\\.\n.\n.TP\n\\fB\\-h\\fR, \\fB\\-\\-help\\fR\nDisplay help and exit\n.\n.P\n\\fBThe following four options are useful only when verifying checksums (\\fB\\-c\\fR)\\fR\n.\n.TP\n\\fB\\-c\\fR, \\fB\\-\\-check\\fR\nRead xxHash sums from the \\fIFILE\\fRs and check them\n.\n.TP\n\\fB\\-\\-quiet\\fR\nExit non\\-zero for improperly formatted checksum lines\n.\n.TP\n\\fB\\-\\-strict\\fR\nDon\\'t print OK for each successfully verified file\n.\n.TP\n\\fB\\-\\-status\\fR\nDon\\'t output anything, status code shows success\n.\n.TP\n\\fB\\-w\\fR, \\fB\\-\\-warn\\fR\nWarn about improperly formatted checksum lines\n.\n.P\n\\fBThe following options are useful only benchmark purpose\\fR\n.\n.TP\n\\fB\\-b\\fR\nBenchmark mode\\. See \\fIEXAMPLES\\fR for details\\.\n.\n.TP\n\\fB\\-B\\fR\\fIBLOCKSIZE\\fR\nOnly useful for benchmark mode (\\fB\\-b\\fR)\\. See \\fIEXAMPLES\\fR for details\\. \\fIBLOCKSIZE\\fR specifies benchmark mode\\'s test data block size in bytes\\. Default value is 102400\n.\n.TP\n\\fB\\-i\\fR\\fIITERATIONS\\fR\nOnly useful for benchmark mode (\\fB\\-b\\fR)\\. See \\fIEXAMPLES\\fR for details\\. \\fIITERATIONS\\fR specifies number of iterations in benchmark\\. Single iteration takes at least 2500 milliseconds\\. Default value is 3\n.\n.SH \"EXIT STATUS\"\n\\fBxxhsum\\fR exit \\fB0\\fR on success, \\fB1\\fR if at least one file couldn\\'t be read or doesn\\'t have the same checksum as the \\fB\\-c\\fR option\\.\n.\n.SH \"EXAMPLES\"\nOutput xxHash (64bit) checksum values of specific files to standard output\n.\n.IP \"\" 4\n.\n.nf\n\n$ xxhsum \\-H1 foo bar baz\n.\n.fi\n.\n.IP \"\" 0\n.\n.P\nOutput xxHash (32bit and 64bit) checksum values of specific files to standard output, and redirect it to \\fBxyz\\.xxh32\\fR and \\fBqux\\.xxh64\\fR\n.\n.IP \"\" 4\n.\n.nf\n\n$ xxhsum \\-H0 foo bar baz > xyz\\.xxh32\n$ xxhsum \\-H1 foo bar baz > qux\\.xxh64\n.\n.fi\n.\n.IP \"\" 0\n.\n.P\nRead xxHash sums from specific files and check them\n.\n.IP \"\" 4\n.\n.nf\n\n$ xxhsum \\-c xyz\\.xxh32 qux\\.xxh64\n.\n.fi\n.\n.IP \"\" 0\n.\n.P\nBenchmark xxHash algorithm for 16384 bytes data in 10 times\\. \\fBxxhsum\\fR benchmarks xxHash algorithm for 32\\-bit and 64\\-bit and output results to standard output\\. First column means algorithm, second column is source data size in bytes, last column means hash generation speed in mega\\-bytes per seconds\\.\n.\n.IP \"\" 4\n.\n.nf\n\n$ xxhsum \\-b \\-i10 \\-B16384\n.\n.fi\n.\n.IP \"\" 0\n.\n.SH \"BUGS\"\nReport bugs at: https://github\\.com/Cyan4973/xxHash/issues/\n.\n.SH \"AUTHOR\"\nYann Collet\n.\n.SH \"SEE ALSO\"\nmd5sum(1)\n"
  },
  {
    "path": "extern/xxHash/xxhsum.1.md",
    "content": "xxhsum(1) -- print or check xxHash non-cryptographic checksums\n==============================================================\n\nSYNOPSIS\n--------\n\n`xxhsum [<OPTION>] ... [<FILE>] ...`  \n`xxhsum -b [<OPTION>] ...`\n\n`xxh32sum` is equivalent to `xxhsum -H0`  \n`xxh64sum` is equivalent to `xxhsum -H1`\n\n\nDESCRIPTION\n-----------\n\nPrint or check xxHash (32 or 64bit) checksums.  When <FILE> is `-`, read\nstandard input.\n\n`xxhsum` supports a command line syntax similar but not identical to\nmd5sum(1).  Differences are:\n`xxhsum` doesn't have text/binary mode switch (`-b`, `-t`);\n`xxhsum` always treats file as binary file;\n`xxhsum` has hash bit width switch (`-H`);\n\nAs xxHash is a fast non-cryptographic checksum algorithm,\n`xxhsum` should not be used for security related purposes.\n\n`xxhsum -b` invokes benchmark mode. See [OPTIONS](#OPTIONS) and [EXAMPLES](#EXAMPLES) for details.\n\nOPTIONS\n-------\n\n* `-V`, `--version`:\n  Display xxhsum version\n\n* `-H`<HASHTYPE>:\n  Hash selection.  <HASHTYPE> means `0`=32bits, `1`=64bits.\n  Default value is `1` (64bits)\n\n* `--little-endian`:\n  Set output hexadecimal checksum value as little endian convention.\n  By default, value is displayed as big endian.\n\n* `-h`, `--help`:\n  Display help and exit\n\n**The following four options are useful only when verifying checksums (`-c`)**\n\n* `-c`, `--check`:\n  Read xxHash sums from the <FILE>s and check them\n\n* `--quiet`:\n  Exit non-zero for improperly formatted checksum lines\n\n* `--strict`:\n  Don't print OK for each successfully verified file\n\n* `--status`:\n  Don't output anything, status code shows success\n\n* `-w`, `--warn`:\n  Warn about improperly formatted checksum lines\n\n**The following options are useful only benchmark purpose**\n\n* `-b`:\n  Benchmark mode.  See [EXAMPLES](#EXAMPLES) for details.\n\n* `-B`<BLOCKSIZE>:\n  Only useful for benchmark mode (`-b`). See [EXAMPLES](#EXAMPLES) for details.\n  <BLOCKSIZE> specifies benchmark mode's test data block size in bytes.\n  Default value is 102400\n\n* `-i`<ITERATIONS>:\n  Only useful for benchmark mode (`-b`). See [EXAMPLES](#EXAMPLES) for details.\n  <ITERATIONS> specifies number of iterations in benchmark. Single iteration\n  takes at least 2500 milliseconds. Default value is 3\n\nEXIT STATUS\n-----------\n\n`xxhsum` exit `0` on success, `1` if at least one file couldn't be read or\ndoesn't have the same checksum as the `-c` option.\n\nEXAMPLES\n--------\n\nOutput xxHash (64bit) checksum values of specific files to standard output\n\n    $ xxhsum -H1 foo bar baz\n\nOutput xxHash (32bit and 64bit) checksum values of specific files to standard\noutput, and redirect it to `xyz.xxh32` and `qux.xxh64`\n\n    $ xxhsum -H0 foo bar baz > xyz.xxh32\n    $ xxhsum -H1 foo bar baz > qux.xxh64\n\nRead xxHash sums from specific files and check them\n\n    $ xxhsum -c xyz.xxh32 qux.xxh64\n\nBenchmark xxHash algorithm for 16384 bytes data in 10 times. `xxhsum`\nbenchmarks xxHash algorithm for 32-bit and 64-bit and output results to\nstandard output.  First column means algorithm, second column is source data\nsize in bytes, last column means hash generation speed in mega-bytes per\nseconds.\n\n    $ xxhsum -b -i10 -B16384\n\nBUGS\n----\n\nReport bugs at: https://github.com/Cyan4973/xxHash/issues/\n\nAUTHOR\n------\n\nYann Collet\n\nSEE ALSO\n--------\n\nmd5sum(1)\n"
  },
  {
    "path": "extern/xxHash/xxhsum.c",
    "content": "/*\n*  xxhsum - Command line interface for xxhash algorithms\n*  Copyright (C) Yann Collet 2012-2016\n*\n*  GPL v2 License\n*\n*  This program is free software; you can redistribute it and/or modify\n*  it under the terms of the GNU General Public License as published by\n*  the Free Software Foundation; either version 2 of the License, or\n*  (at your option) any later version.\n*\n*  This program is distributed in the hope that it will be useful,\n*  but WITHOUT ANY WARRANTY; without even the implied warranty of\n*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n*  GNU General Public License for more details.\n*\n*  You should have received a copy of the GNU General Public License along\n*  with this program; if not, write to the Free Software Foundation, Inc.,\n*  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n*\n*  You can contact the author at :\n*  - xxHash homepage : http://www.xxhash.com\n*  - xxHash source repository : https://github.com/Cyan4973/xxHash\n*/\n\n/* xxhsum :\n * Provides hash value of a file content, or a list of files, or stdin\n * Display convention is Big Endian, for both 32 and 64 bits algorithms\n */\n\n#ifndef XXHASH_C_2097394837\n#define XXHASH_C_2097394837\n\n/* ************************************\n *  Compiler Options\n **************************************/\n/* MS Visual */\n#if defined(_MSC_VER) || defined(_WIN32)\n#  define _CRT_SECURE_NO_WARNINGS   /* removes visual warnings */\n#endif\n\n/* Under Linux at least, pull in the *64 commands */\n#ifndef _LARGEFILE64_SOURCE\n#  define _LARGEFILE64_SOURCE\n#endif\n\n\n/* ************************************\n *  Includes\n **************************************/\n#include <stdlib.h>     /* malloc, calloc, free, exit */\n#include <stdio.h>      /* fprintf, fopen, ftello64, fread, stdin, stdout, _fileno (when present) */\n#include <string.h>     /* strcmp */\n#include <sys/types.h>  /* stat, stat64, _stat64 */\n#include <sys/stat.h>   /* stat, stat64, _stat64 */\n#include <time.h>       /* clock_t, clock, CLOCKS_PER_SEC */\n#include <assert.h>     /* assert */\n\n#define XXH_STATIC_LINKING_ONLY   /* *_state_t */\n#include \"xxhash.h\"\n\n\n/* ************************************\n *  OS-Specific Includes\n **************************************/\n#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(_WIN32) || defined(__CYGWIN__)\n#  include <fcntl.h>    /* _O_BINARY */\n#  include <io.h>       /* _setmode, _isatty */\n#  define SET_BINARY_MODE(file) _setmode(_fileno(file), _O_BINARY)\n#  define IS_CONSOLE(stdStream) _isatty(_fileno(stdStream))\n#else\n#  include <unistd.h>   /* isatty, STDIN_FILENO */\n#  define SET_BINARY_MODE(file)\n#  define IS_CONSOLE(stdStream) isatty(STDIN_FILENO)\n#endif\n\n#if !defined(S_ISREG)\n#  define S_ISREG(x) (((x) & S_IFMT) == S_IFREG)\n#endif\n\n\n/* ************************************\n*  Basic Types\n**************************************/\n#ifndef MEM_MODULE\n# define MEM_MODULE\n# if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */\n#   include <stdint.h>\n    typedef uint8_t  BYTE;\n    typedef uint16_t U16;\n    typedef uint32_t U32;\n    typedef  int32_t S32;\n    typedef uint64_t U64;\n#  else\n    typedef unsigned char      BYTE;\n    typedef unsigned short     U16;\n    typedef unsigned int       U32;\n    typedef   signed int       S32;\n    typedef unsigned long long U64;\n#  endif\n#endif\n\nstatic unsigned BMK_isLittleEndian(void)\n{\n    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */\n    return one.c[0];\n}\n\n\n/* *************************************\n *  Constants\n ***************************************/\n#define LIB_VERSION XXH_VERSION_MAJOR.XXH_VERSION_MINOR.XXH_VERSION_RELEASE\n#define QUOTE(str) #str\n#define EXPAND_AND_QUOTE(str) QUOTE(str)\n#define PROGRAM_VERSION EXPAND_AND_QUOTE(LIB_VERSION)\nstatic const int g_nbBits = (int)(sizeof(void*)*8);\nstatic const char g_lename[] = \"little endian\";\nstatic const char g_bename[] = \"big endian\";\n#define ENDIAN_NAME (BMK_isLittleEndian() ? g_lename : g_bename)\nstatic const char author[] = \"Yann Collet\";\n#define WELCOME_MESSAGE(exename) \"%s %s (%i-bits %s), by %s \\n\", \\\n                    exename, PROGRAM_VERSION, g_nbBits, ENDIAN_NAME, author\n\n#define KB *( 1<<10)\n#define MB *( 1<<20)\n#define GB *(1U<<30)\n\nstatic size_t XXH_DEFAULT_SAMPLE_SIZE = 100 KB;\n#define NBLOOPS    3                              /* Default number of benchmark iterations */\n#define TIMELOOP_S 1\n#define TIMELOOP  (TIMELOOP_S * CLOCKS_PER_SEC)   /* Minimum timing per iteration */\n#define XXHSUM32_DEFAULT_SEED 0                   /* Default seed for algo_xxh32 */\n#define XXHSUM64_DEFAULT_SEED 0                   /* Default seed for algo_xxh64 */\n\n#define MAX_MEM    (2 GB - 64 MB)\n\nstatic const char stdinName[] = \"-\";\ntypedef enum { algo_xxh32, algo_xxh64 } algoType;\nstatic const algoType g_defaultAlgo = algo_xxh64;    /* required within main() & usage() */\n\n/* <16 hex char> <SPC> <SPC> <filename> <'\\0'>\n * '4096' is typical Linux PATH_MAX configuration. */\n#define DEFAULT_LINE_LENGTH (sizeof(XXH64_hash_t) * 2 + 2 + 4096 + 1)\n\n/* Maximum acceptable line length. */\n#define MAX_LINE_LENGTH (32 KB)\n\n\n/* ************************************\n *  Display macros\n **************************************/\n#define DISPLAY(...)         fprintf(stderr, __VA_ARGS__)\n#define DISPLAYRESULT(...)   fprintf(stdout, __VA_ARGS__)\n#define DISPLAYLEVEL(l, ...) do { if (g_displayLevel>=l) DISPLAY(__VA_ARGS__); } while (0)\nstatic int g_displayLevel = 2;\n\n\n/* ************************************\n *  Local variables\n **************************************/\nstatic U32 g_nbIterations = NBLOOPS;\n\n\n/* ************************************\n *  Benchmark Functions\n **************************************/\nstatic clock_t BMK_clockSpan( clock_t start )\n{\n    return clock() - start;   /* works even if overflow; Typical max span ~ 30 mn */\n}\n\n\nstatic size_t BMK_findMaxMem(U64 requiredMem)\n{\n    size_t const step = 64 MB;\n    void* testmem = NULL;\n\n    requiredMem = (((requiredMem >> 26) + 1) << 26);\n    requiredMem += 2*step;\n    if (requiredMem > MAX_MEM) requiredMem = MAX_MEM;\n\n    while (!testmem) {\n        if (requiredMem > step) requiredMem -= step;\n        else requiredMem >>= 1;\n        testmem = malloc ((size_t)requiredMem);\n    }\n    free (testmem);\n\n    /* keep some space available */\n    if (requiredMem > step) requiredMem -= step;\n    else requiredMem >>= 1;\n\n    return (size_t)requiredMem;\n}\n\n\nstatic U64 BMK_GetFileSize(const char* infilename)\n{\n    int r;\n#if defined(_MSC_VER)\n    struct _stat64 statbuf;\n    r = _stat64(infilename, &statbuf);\n#else\n    struct stat statbuf;\n    r = stat(infilename, &statbuf);\n#endif\n    if (r || !S_ISREG(statbuf.st_mode)) return 0;   /* No good... */\n    return (U64)statbuf.st_size;\n}\n\ntypedef U32 (*hashFunction)(const void* buffer, size_t bufferSize, U32 seed);\n\nstatic U32 localXXH32(const void* buffer, size_t bufferSize, U32 seed) { return XXH32(buffer, bufferSize, seed); }\n\nstatic U32 localXXH64(const void* buffer, size_t bufferSize, U32 seed) { return (U32)XXH64(buffer, bufferSize, seed); }\n\nstatic void BMK_benchHash(hashFunction h, const char* hName, const void* buffer, size_t bufferSize)\n{\n    U32 nbh_perIteration = ((300 MB) / (bufferSize+1)) + 1;  /* first loop conservatively aims for 300 MB/s */\n    U32 iterationNb;\n    double fastestH = 100000000.;\n\n    DISPLAYLEVEL(2, \"\\r%70s\\r\", \"\");       /* Clean display line */\n    if (g_nbIterations<1) g_nbIterations=1;\n    for (iterationNb = 1; iterationNb <= g_nbIterations; iterationNb++) {\n        U32 r=0;\n        clock_t cStart;\n\n        DISPLAYLEVEL(2, \"%1i-%-17.17s : %10u ->\\r\", iterationNb, hName, (U32)bufferSize);\n        cStart = clock();\n        while (clock() == cStart);   /* starts clock() at its exact beginning */\n        cStart = clock();\n\n        {   U32 i;\n            for (i=0; i<nbh_perIteration; i++)\n                r += h(buffer, bufferSize, i);\n        }\n        if (r==0) DISPLAYLEVEL(3,\".\\r\");  /* do something with r to avoid compiler \"optimizing\" away hash function */\n        {   double const timeS = ((double)BMK_clockSpan(cStart) / CLOCKS_PER_SEC) / nbh_perIteration;\n            if (timeS < fastestH) fastestH = timeS;\n            DISPLAYLEVEL(2, \"%1i-%-17.17s : %10u -> %8.0f it/s (%7.1f MB/s) \\r\",\n                    iterationNb, hName, (U32)bufferSize,\n                    (double)1 / fastestH,\n                    ((double)bufferSize / (1<<20)) / fastestH );\n        }\n        assert(fastestH > 1./2000000000);  /* avoid U32 overflow */\n        nbh_perIteration = (U32)(1 / fastestH) + 1;  /* adjust nbh_perIteration to last roughtly one second */\n    }\n    DISPLAYLEVEL(1, \"%-19.19s : %10u -> %8.0f it/s (%7.1f MB/s) \\n\", hName, (U32)bufferSize,\n        (double)1 / fastestH,\n        ((double)bufferSize / (1<<20)) / fastestH);\n    if (g_displayLevel<1)\n        DISPLAYLEVEL(0, \"%u, \", (U32)((double)1 / fastestH));\n}\n\n\n/* BMK_benchMem():\n * specificTest : 0 == run all tests, 1+ run only specific test\n * buffer : is supposed 8-bytes aligned (if malloc'ed, it should be)\n * the real allocated size of buffer is supposed to be >= (bufferSize+3).\n * @return : 0 on success, 1 if error (invalid mode selected) */\nstatic int BMK_benchMem(const void* buffer, size_t bufferSize, U32 specificTest)\n{\n    assert((((size_t)buffer) & 8) == 0);  /* ensure alignment */\n\n    /* XXH32 bench */\n    if ((specificTest==0) | (specificTest==1))\n        BMK_benchHash(localXXH32, \"XXH32\", buffer, bufferSize);\n\n    /* Bench XXH32 on Unaligned input */\n    if ((specificTest==0) | (specificTest==2))\n        BMK_benchHash(localXXH32, \"XXH32 unaligned\", ((const char*)buffer)+1, bufferSize);\n\n    /* Bench XXH64 */\n    if ((specificTest==0) | (specificTest==3))\n        BMK_benchHash(localXXH64, \"XXH64\", buffer, bufferSize);\n\n    /* Bench XXH64 on Unaligned input */\n    if ((specificTest==0) | (specificTest==4))\n        BMK_benchHash(localXXH64, \"XXH64 unaligned\", ((const char*)buffer)+3, bufferSize);\n\n    if (specificTest > 4) {\n        DISPLAY(\"benchmark mode invalid \\n\");\n        return 1;\n    }\n    return 0;\n}\n\n\nstatic size_t BMK_selectBenchedSize(const char* fileName)\n{   U64 const inFileSize = BMK_GetFileSize(fileName);\n    size_t benchedSize = (size_t) BMK_findMaxMem(inFileSize);\n    if ((U64)benchedSize > inFileSize) benchedSize = (size_t)inFileSize;\n    if (benchedSize < inFileSize) {\n        DISPLAY(\"Not enough memory for '%s' full size; testing %i MB only...\\n\", fileName, (int)(benchedSize>>20));\n    }\n    return benchedSize;\n}\n\n\nstatic int BMK_benchFiles(const char** fileNamesTable, int nbFiles, U32 specificTest)\n{\n    int result = 0;\n    int fileIdx;\n\n    for (fileIdx=0; fileIdx<nbFiles; fileIdx++) {\n        const char* const inFileName = fileNamesTable[fileIdx];\n        FILE* const inFile = fopen( inFileName, \"rb\" );\n        size_t const benchedSize = BMK_selectBenchedSize(inFileName);\n        char* const buffer = (char*)calloc(benchedSize+16+3, 1);\n        void* const alignedBuffer = (buffer+15) - (((size_t)(buffer+15)) & 0xF);  /* align on next 16 bytes */\n\n        /* Checks */\n        if ((inFile==NULL) || (inFileName==NULL)) {\n            DISPLAY(\"Pb opening %s\\n\", inFileName);\n            free(buffer);\n            return 11;\n        }\n        if(!buffer) {\n            DISPLAY(\"\\nError: not enough memory!\\n\");\n            fclose(inFile);\n            return 12;\n        }\n\n        /* Fill input buffer */\n        DISPLAYLEVEL(1, \"\\rLoading %s...        \\n\", inFileName);\n        {   size_t const readSize = fread(alignedBuffer, 1, benchedSize, inFile);\n            fclose(inFile);\n            if(readSize != benchedSize) {\n                DISPLAY(\"\\nError: problem reading file '%s' !!    \\n\", inFileName);\n                free(buffer);\n                return 13;\n        }   }\n\n        /* bench */\n        result |= BMK_benchMem(alignedBuffer, benchedSize, specificTest);\n\n        free(buffer);\n    }\n\n    return result;\n}\n\n\n\nstatic int BMK_benchInternal(size_t keySize, int specificTest)\n{\n    void* const buffer = calloc(keySize+16+3, 1);\n    void* const alignedBuffer = ((char*)buffer+15) - (((size_t)((char*)buffer+15)) & 0xF);  /* align on next 16 bytes */\n    if(!buffer) {\n        DISPLAY(\"\\nError: not enough memory!\\n\");\n        return 12;\n    }\n\n    /* bench */\n    DISPLAYLEVEL(1, \"Sample of \");\n    if (keySize > 10 KB) {\n        DISPLAYLEVEL(1, \"%u KB\", (U32)(keySize >> 10));\n    } else {\n        DISPLAYLEVEL(1, \"%u bytes\", (U32)keySize);\n    }\n    DISPLAYLEVEL(1, \"...        \\n\");\n\n    {   int const result = BMK_benchMem(alignedBuffer, keySize, specificTest);\n        free(buffer);\n        return result;\n    }\n}\n\n\nstatic void BMK_checkResult(U32 r1, U32 r2)\n{\n    static int nbTests = 1;\n    if (r1==r2) {\n        DISPLAYLEVEL(3, \"\\rTest%3i : %08X == %08X   ok   \", nbTests, r1, r2);\n    } else {\n        DISPLAY(\"\\rERROR : Test%3i : %08X <> %08X   !!!!!   \\n\", nbTests, r1, r2);\n        exit(1);\n    }\n    nbTests++;\n}\n\n\nstatic void BMK_checkResult64(U64 r1, U64 r2)\n{\n    static int nbTests = 1;\n    if (r1!=r2) {\n        DISPLAY(\"\\rERROR : Test%3i : 64-bit values non equals   !!!!!   \\n\", nbTests);\n        DISPLAY(\"\\r %08X%08X != %08X%08X \\n\", (U32)(r1>>32), (U32)r1, (U32)(r2>>32), (U32)r2);\n        exit(1);\n    }\n    nbTests++;\n}\n\n\nstatic void BMK_testSequence64(void* sentence, size_t len, U64 seed, U64 Nresult)\n{\n    XXH64_state_t state;\n    U64 Dresult;\n    size_t pos;\n\n    Dresult = XXH64(sentence, len, seed);\n    BMK_checkResult64(Dresult, Nresult);\n\n    XXH64_reset(&state, seed);\n    XXH64_update(&state, sentence, len);\n    Dresult = XXH64_digest(&state);\n    BMK_checkResult64(Dresult, Nresult);\n\n    XXH64_reset(&state, seed);\n    for (pos=0; pos<len; pos++)\n        XXH64_update(&state, ((char*)sentence)+pos, 1);\n    Dresult = XXH64_digest(&state);\n    BMK_checkResult64(Dresult, Nresult);\n}\n\n\nstatic void BMK_testSequence(const void* sequence, size_t len, U32 seed, U32 Nresult)\n{\n    XXH32_state_t state;\n    U32 Dresult;\n    size_t pos;\n\n    Dresult = XXH32(sequence, len, seed);\n    BMK_checkResult(Dresult, Nresult);\n\n    XXH32_reset(&state, seed);\n    XXH32_update(&state, sequence, len);\n    Dresult = XXH32_digest(&state);\n    BMK_checkResult(Dresult, Nresult);\n\n    XXH32_reset(&state, seed);\n    for (pos=0; pos<len; pos++)\n        XXH32_update(&state, ((const char*)sequence)+pos, 1);\n    Dresult = XXH32_digest(&state);\n    BMK_checkResult(Dresult, Nresult);\n}\n\n\n#define SANITY_BUFFER_SIZE 101\nstatic void BMK_sanityCheck(void)\n{\n    static const U32 prime = 2654435761U;\n    BYTE sanityBuffer[SANITY_BUFFER_SIZE];\n    U32 byteGen = prime;\n\n    int i;\n    for (i=0; i<SANITY_BUFFER_SIZE; i++) {\n        sanityBuffer[i] = (BYTE)(byteGen>>24);\n        byteGen *= byteGen;\n    }\n\n    BMK_testSequence(NULL,          0, 0,     0x02CC5D05);\n    BMK_testSequence(NULL,          0, prime, 0x36B78AE7);\n    BMK_testSequence(sanityBuffer,  1, 0,     0xB85CBEE5);\n    BMK_testSequence(sanityBuffer,  1, prime, 0xD5845D64);\n    BMK_testSequence(sanityBuffer, 14, 0,     0xE5AA0AB4);\n    BMK_testSequence(sanityBuffer, 14, prime, 0x4481951D);\n    BMK_testSequence(sanityBuffer, SANITY_BUFFER_SIZE, 0,     0x1F1AA412);\n    BMK_testSequence(sanityBuffer, SANITY_BUFFER_SIZE, prime, 0x498EC8E2);\n\n    BMK_testSequence64(NULL        ,  0, 0,     0xEF46DB3751D8E999ULL);\n    BMK_testSequence64(NULL        ,  0, prime, 0xAC75FDA2929B17EFULL);\n    BMK_testSequence64(sanityBuffer,  1, 0,     0x4FCE394CC88952D8ULL);\n    BMK_testSequence64(sanityBuffer,  1, prime, 0x739840CB819FA723ULL);\n    BMK_testSequence64(sanityBuffer, 14, 0,     0xCFFA8DB881BC3A3DULL);\n    BMK_testSequence64(sanityBuffer, 14, prime, 0x5B9611585EFCC9CBULL);\n    BMK_testSequence64(sanityBuffer, SANITY_BUFFER_SIZE, 0,     0x0EAB543384F878ADULL);\n    BMK_testSequence64(sanityBuffer, SANITY_BUFFER_SIZE, prime, 0xCAA65939306F1E21ULL);\n\n    DISPLAYLEVEL(3, \"\\r%70s\\r\", \"\");       /* Clean display line */\n    DISPLAYLEVEL(3, \"Sanity check -- all tests ok\\n\");\n}\n\n\n/* ********************************************************\n*  File Hashing\n**********************************************************/\n\nstatic void BMK_display_LittleEndian(const void* ptr, size_t length)\n{\n    const BYTE* p = (const BYTE*)ptr;\n    size_t idx;\n    for (idx=length-1; idx<length; idx--)    /* intentional underflow to negative to detect end */\n        DISPLAYRESULT(\"%02x\", p[idx]);\n}\n\nstatic void BMK_display_BigEndian(const void* ptr, size_t length)\n{\n    const BYTE* p = (const BYTE*)ptr;\n    size_t idx;\n    for (idx=0; idx<length; idx++)\n        DISPLAYRESULT(\"%02x\", p[idx]);\n}\n\nstatic void BMK_hashStream(void* xxhHashValue, const algoType hashType, FILE* inFile, void* buffer, size_t blockSize)\n{\n    XXH64_state_t state64;\n    XXH32_state_t state32;\n    size_t readSize;\n\n    /* Init */\n    XXH32_reset(&state32, XXHSUM32_DEFAULT_SEED);\n    XXH64_reset(&state64, XXHSUM64_DEFAULT_SEED);\n\n    /* Load file & update hash */\n    readSize = 1;\n    while (readSize) {\n        readSize = fread(buffer, 1, blockSize, inFile);\n        switch(hashType)\n        {\n        case algo_xxh32:\n            XXH32_update(&state32, buffer, readSize);\n            break;\n        case algo_xxh64:\n            XXH64_update(&state64, buffer, readSize);\n            break;\n        default:\n            break;\n        }\n    }\n\n    switch(hashType)\n    {\n    case algo_xxh32:\n        {   U32 const h32 = XXH32_digest(&state32);\n            memcpy(xxhHashValue, &h32, sizeof(h32));\n            break;\n        }\n    case algo_xxh64:\n        {   U64 const h64 = XXH64_digest(&state64);\n            memcpy(xxhHashValue, &h64, sizeof(h64));\n            break;\n        }\n    default:\n            break;\n    }\n}\n\n\ntypedef enum { big_endian, little_endian} endianess;\n\nstatic int BMK_hash(const char* fileName,\n                    const algoType hashType,\n                    const endianess displayEndianess)\n{\n    FILE*  inFile;\n    size_t const blockSize = 64 KB;\n    void*  buffer;\n    U32    h32 = 0;\n    U64    h64 = 0;\n\n    /* Check file existence */\n    if (fileName == stdinName) {\n        inFile = stdin;\n        SET_BINARY_MODE(stdin);\n    }\n    else\n        inFile = fopen( fileName, \"rb\" );\n    if (inFile==NULL) {\n        DISPLAY( \"Pb opening %s\\n\", fileName);\n        return 1;\n    }\n\n    /* Memory allocation & restrictions */\n    buffer = malloc(blockSize);\n    if(!buffer) {\n        DISPLAY(\"\\nError: not enough memory!\\n\");\n        fclose(inFile);\n        return 1;\n    }\n\n    /* loading notification */\n    {   const size_t fileNameSize = strlen(fileName);\n        const char* const fileNameEnd = fileName + fileNameSize;\n        const int maxInfoFilenameSize = (int)(fileNameSize > 30 ? 30 : fileNameSize);\n        int infoFilenameSize = 1;\n        while ((infoFilenameSize < maxInfoFilenameSize)\n            && (fileNameEnd[-1-infoFilenameSize] != '/')\n            && (fileNameEnd[-1-infoFilenameSize] != '\\\\') )\n              infoFilenameSize++;\n        DISPLAY(\"\\rLoading %s...  \\r\", fileNameEnd - infoFilenameSize);\n\n        /* Load file & update hash */\n        switch(hashType)\n        {\n        case algo_xxh32:\n            BMK_hashStream(&h32, algo_xxh32, inFile, buffer, blockSize);\n            break;\n        case algo_xxh64:\n            BMK_hashStream(&h64, algo_xxh64, inFile, buffer, blockSize);\n            break;\n        default:\n            break;\n        }\n\n        fclose(inFile);\n        free(buffer);\n        DISPLAY(\"%s             \\r\", fileNameEnd - infoFilenameSize);  /* erase line */\n    }\n\n    /* display Hash */\n    switch(hashType)\n    {\n    case algo_xxh32:\n        {   XXH32_canonical_t hcbe32;\n            XXH32_canonicalFromHash(&hcbe32, h32);\n            displayEndianess==big_endian ?\n                BMK_display_BigEndian(&hcbe32, sizeof(hcbe32)) : BMK_display_LittleEndian(&hcbe32, sizeof(hcbe32));\n            DISPLAYRESULT(\"  %s\\n\", fileName);\n            break;\n        }\n    case algo_xxh64:\n        {   XXH64_canonical_t hcbe64;\n            XXH64_canonicalFromHash(&hcbe64, h64);\n            displayEndianess==big_endian ?\n                BMK_display_BigEndian(&hcbe64, sizeof(hcbe64)) : BMK_display_LittleEndian(&hcbe64, sizeof(hcbe64));\n            DISPLAYRESULT(\"  %s\\n\", fileName);\n            break;\n        }\n    default:\n            break;\n    }\n\n    return 0;\n}\n\n\nstatic int BMK_hashFiles(const char** fnList, int fnTotal,\n                         algoType hashType, endianess displayEndianess)\n{\n    int fnNb;\n    int result = 0;\n\n    if (fnTotal==0)\n        return BMK_hash(stdinName, hashType, displayEndianess);\n\n    for (fnNb=0; fnNb<fnTotal; fnNb++)\n        result += BMK_hash(fnList[fnNb], hashType, displayEndianess);\n    DISPLAY(\"\\r%70s\\r\", \"\");\n    return result;\n}\n\n\ntypedef enum {\n    GetLine_ok,\n    GetLine_eof,\n    GetLine_exceedMaxLineLength,\n    GetLine_outOfMemory,\n} GetLineResult;\n\ntypedef enum {\n    CanonicalFromString_ok,\n    CanonicalFromString_invalidFormat,\n} CanonicalFromStringResult;\n\ntypedef enum {\n    ParseLine_ok,\n    ParseLine_invalidFormat,\n} ParseLineResult;\n\ntypedef enum {\n    LineStatus_hashOk,\n    LineStatus_hashFailed,\n    LineStatus_failedToOpen,\n} LineStatus;\n\ntypedef union {\n    XXH32_canonical_t xxh32;\n    XXH64_canonical_t xxh64;\n} Canonical;\n\ntypedef struct {\n    Canonical   canonical;\n    const char* filename;\n    int         xxhBits;    /* canonical type : 32:xxh32, 64:xxh64 */\n} ParsedLine;\n\ntypedef struct {\n    unsigned long   nProperlyFormattedLines;\n    unsigned long   nImproperlyFormattedLines;\n    unsigned long   nMismatchedChecksums;\n    unsigned long   nOpenOrReadFailures;\n    unsigned long   nMixedFormatLines;\n    int             xxhBits;\n    int             quit;\n} ParseFileReport;\n\ntypedef struct {\n    const char*     inFileName;\n    FILE*           inFile;\n    int             lineMax;\n    char*           lineBuf;\n    size_t          blockSize;\n    char*           blockBuf;\n    int             strictMode;\n    int             statusOnly;\n    int             warn;\n    int             quiet;\n    ParseFileReport report;\n} ParseFileArg;\n\n\n/*  Read line from stream.\n    Returns GetLine_ok, if it reads line successfully.\n    Returns GetLine_eof, if stream reaches EOF.\n    Returns GetLine_exceedMaxLineLength, if line length is longer than MAX_LINE_LENGTH.\n    Returns GetLine_outOfMemory, if line buffer memory allocation failed.\n */\nstatic GetLineResult getLine(char** lineBuf, int* lineMax, FILE* inFile)\n{\n    GetLineResult result = GetLine_ok;\n    int len = 0;\n\n    if ((*lineBuf == NULL) || (*lineMax<1)) {\n        free(*lineBuf);  /* in case it's != NULL */\n        *lineMax = 0;\n        *lineBuf = (char*)malloc(DEFAULT_LINE_LENGTH);\n        if(*lineBuf == NULL) return GetLine_outOfMemory;\n        *lineMax = DEFAULT_LINE_LENGTH;\n    }\n\n    for (;;) {\n        const int c = fgetc(inFile);\n        if (c == EOF) {\n            /* If we meet EOF before first character, returns GetLine_eof,\n             * otherwise GetLine_ok.\n             */\n            if (len == 0) result = GetLine_eof;\n            break;\n        }\n\n        /* Make enough space for len+1 (for final NUL) bytes. */\n        if (len+1 >= *lineMax) {\n            char* newLineBuf = NULL;\n            int newBufSize = *lineMax;\n\n            newBufSize += (newBufSize/2) + 1; /* x 1.5 */\n            if (newBufSize > MAX_LINE_LENGTH) newBufSize = MAX_LINE_LENGTH;\n            if (len+1 >= newBufSize) return GetLine_exceedMaxLineLength;\n\n            newLineBuf = (char*) realloc(*lineBuf, newBufSize);\n            if (newLineBuf == NULL) return GetLine_outOfMemory;\n\n            *lineBuf = newLineBuf;\n            *lineMax = newBufSize;\n        }\n\n        if (c == '\\n') break;\n        (*lineBuf)[len++] = (char) c;\n    }\n\n    (*lineBuf)[len] = '\\0';\n    return result;\n}\n\n\n/*  Converts one hexadecimal character to integer.\n *  Returns -1, if given character is not hexadecimal.\n */\nstatic int charToHex(char c)\n{\n    int result = -1;\n    if (c >= '0' && c <= '9') {\n        result = (int) (c - '0');\n    } else if (c >= 'A' && c <= 'F') {\n        result = (int) (c - 'A') + 0x0a;\n    } else if (c >= 'a' && c <= 'f') {\n        result = (int) (c - 'a') + 0x0a;\n    }\n    return result;\n}\n\n\n/*  Converts XXH32 canonical hexadecimal string hashStr to big endian unsigned char array dst.\n *  Returns CANONICAL_FROM_STRING_INVALID_FORMAT, if hashStr is not well formatted.\n *  Returns CANONICAL_FROM_STRING_OK, if hashStr is parsed successfully.\n */\nstatic CanonicalFromStringResult canonicalFromString(unsigned char* dst,\n                                                     size_t dstSize,\n                                                     const char* hashStr)\n{\n    size_t i;\n    for (i = 0; i < dstSize; ++i) {\n        int h0, h1;\n\n        h0 = charToHex(hashStr[i*2 + 0]);\n        if (h0 < 0) return CanonicalFromString_invalidFormat;\n\n        h1 = charToHex(hashStr[i*2 + 1]);\n        if (h1 < 0) return CanonicalFromString_invalidFormat;\n\n        dst[i] = (unsigned char) ((h0 << 4) | h1);\n    }\n    return CanonicalFromString_ok;\n}\n\n\n/*  Parse single line of xxHash checksum file.\n *  Returns PARSE_LINE_ERROR_INVALID_FORMAT, if line is not well formatted.\n *  Returns PARSE_LINE_OK if line is parsed successfully.\n *  And members of parseLine will be filled by parsed values.\n *\n *  - line must be ended with '\\0'.\n *  - Since parsedLine.filename will point within given argument `line`,\n *    users must keep `line`s content during they are using parsedLine.\n *\n *  Given xxHash checksum line should have the following format:\n *\n *      <8 or 16 hexadecimal char> <space> <space> <filename...> <'\\0'>\n */\nstatic ParseLineResult parseLine(ParsedLine* parsedLine, const char* line)\n{\n    const char* const firstSpace = strchr(line, ' ');\n    const char* const secondSpace = firstSpace + 1;\n\n    parsedLine->filename = NULL;\n    parsedLine->xxhBits = 0;\n\n    if (firstSpace == NULL || *secondSpace != ' ') return ParseLine_invalidFormat;\n\n    switch (firstSpace - line)\n    {\n    case 8:\n        {   XXH32_canonical_t* xxh32c = &parsedLine->canonical.xxh32;\n            if (canonicalFromString(xxh32c->digest, sizeof(xxh32c->digest), line)\n                != CanonicalFromString_ok) {\n                return ParseLine_invalidFormat;\n            }\n            parsedLine->xxhBits = 32;\n            break;\n        }\n\n    case 16:\n        {   XXH64_canonical_t* xxh64c = &parsedLine->canonical.xxh64;\n            if (canonicalFromString(xxh64c->digest, sizeof(xxh64c->digest), line)\n                != CanonicalFromString_ok) {\n                return ParseLine_invalidFormat;\n            }\n            parsedLine->xxhBits = 64;\n            break;\n        }\n\n    default:\n            return ParseLine_invalidFormat;\n            break;\n    }\n\n    parsedLine->filename = secondSpace + 1;\n    return ParseLine_ok;\n}\n\n\n/*!  Parse xxHash checksum file.\n */\nstatic void parseFile1(ParseFileArg* parseFileArg)\n{\n    const char* const inFileName = parseFileArg->inFileName;\n    ParseFileReport* const report = &parseFileArg->report;\n\n    unsigned long lineNumber = 0;\n    memset(report, 0, sizeof(*report));\n\n    while (!report->quit) {\n        FILE* fp = NULL;\n        LineStatus lineStatus = LineStatus_hashFailed;\n        GetLineResult getLineResult;\n        ParsedLine parsedLine;\n        memset(&parsedLine, 0, sizeof(parsedLine));\n\n        lineNumber++;\n        if (lineNumber == 0) {\n            /* This is unlikely happen, but md5sum.c has this\n             * error check. */\n            DISPLAY(\"%s : too many checksum lines\\n\", inFileName);\n            report->quit = 1;\n            break;\n        }\n\n        getLineResult = getLine(&parseFileArg->lineBuf, &parseFileArg->lineMax,\n                                parseFileArg->inFile);\n        if (getLineResult != GetLine_ok) {\n            if (getLineResult == GetLine_eof) break;\n\n            switch (getLineResult)\n            {\n            case GetLine_ok:\n            case GetLine_eof:\n                /* These cases never happen.  See above getLineResult related \"if\"s.\n                   They exist just for make gcc's -Wswitch-enum happy. */\n                break;\n\n            default:\n                DISPLAY(\"%s : %lu: unknown error\\n\", inFileName, lineNumber);\n                break;\n\n            case GetLine_exceedMaxLineLength:\n                DISPLAY(\"%s : %lu: too long line\\n\", inFileName, lineNumber);\n                break;\n\n            case GetLine_outOfMemory:\n                DISPLAY(\"%s : %lu: out of memory\\n\", inFileName, lineNumber);\n                break;\n            }\n            report->quit = 1;\n            break;\n        }\n\n        if (parseLine(&parsedLine, parseFileArg->lineBuf) != ParseLine_ok) {\n            report->nImproperlyFormattedLines++;\n            if (parseFileArg->warn) {\n                DISPLAY(\"%s : %lu: improperly formatted XXHASH checksum line\\n\"\n                    , inFileName, lineNumber);\n            }\n            continue;\n        }\n\n        if (report->xxhBits != 0 && report->xxhBits != parsedLine.xxhBits) {\n            /* Don't accept xxh32/xxh64 mixed file */\n            report->nImproperlyFormattedLines++;\n            report->nMixedFormatLines++;\n            if (parseFileArg->warn) {\n                DISPLAY(\"%s : %lu: improperly formatted XXHASH checksum line (XXH32/64)\\n\"\n                    , inFileName, lineNumber);\n            }\n            continue;\n        }\n\n        report->nProperlyFormattedLines++;\n        if (report->xxhBits == 0) {\n            report->xxhBits = parsedLine.xxhBits;\n        }\n\n        fp = fopen(parsedLine.filename, \"rb\");\n        if (fp == NULL) {\n            lineStatus = LineStatus_failedToOpen;\n        } else {\n            lineStatus = LineStatus_hashFailed;\n            switch (parsedLine.xxhBits)\n            {\n            case 32:\n                {   XXH32_hash_t xxh;\n                    BMK_hashStream(&xxh, algo_xxh32, fp, parseFileArg->blockBuf, parseFileArg->blockSize);\n                    if (xxh == XXH32_hashFromCanonical(&parsedLine.canonical.xxh32)) {\n                        lineStatus = LineStatus_hashOk;\n                }   }\n                break;\n\n            case 64:\n                {   XXH64_hash_t xxh;\n                    BMK_hashStream(&xxh, algo_xxh64, fp, parseFileArg->blockBuf, parseFileArg->blockSize);\n                    if (xxh == XXH64_hashFromCanonical(&parsedLine.canonical.xxh64)) {\n                        lineStatus = LineStatus_hashOk;\n                }   }\n                break;\n\n            default:\n                break;\n            }\n            fclose(fp);\n        }\n\n        switch (lineStatus)\n        {\n        default:\n            DISPLAY(\"%s : unknown error\\n\", inFileName);\n            report->quit = 1;\n            break;\n\n        case LineStatus_failedToOpen:\n            report->nOpenOrReadFailures++;\n            if (!parseFileArg->statusOnly) {\n                DISPLAYRESULT(\"%s : %lu: FAILED open or read %s\\n\"\n                    , inFileName, lineNumber, parsedLine.filename);\n            }\n            break;\n\n        case LineStatus_hashOk:\n        case LineStatus_hashFailed:\n            {   int b = 1;\n                if (lineStatus == LineStatus_hashOk) {\n                    /* If --quiet is specified, don't display \"OK\" */\n                    if (parseFileArg->quiet) b = 0;\n                } else {\n                    report->nMismatchedChecksums++;\n                }\n\n                if (b && !parseFileArg->statusOnly) {\n                    DISPLAYRESULT(\"%s: %s\\n\", parsedLine.filename\n                        , lineStatus == LineStatus_hashOk ? \"OK\" : \"FAILED\");\n            }   }\n            break;\n        }\n    }   /* while (!report->quit) */\n}\n\n\n/*  Parse xxHash checksum file.\n *  Returns 1, if all procedures were succeeded.\n *  Returns 0, if any procedures was failed.\n *\n *  If strictMode != 0, return error code if any line is invalid.\n *  If statusOnly != 0, don't generate any output.\n *  If warn != 0, print a warning message to stderr.\n *  If quiet != 0, suppress \"OK\" line.\n *\n *  \"All procedures are succeeded\" means:\n *    - Checksum file contains at least one line and less than SIZE_T_MAX lines.\n *    - All files are properly opened and read.\n *    - All hash values match with its content.\n *    - (strict mode) All lines in checksum file are consistent and well formatted.\n *\n */\nstatic int checkFile(const char* inFileName,\n                     const endianess displayEndianess,\n                     U32 strictMode,\n                     U32 statusOnly,\n                     U32 warn,\n                     U32 quiet)\n{\n    int result = 0;\n    FILE* inFile = NULL;\n    ParseFileArg parseFileArgBody;\n    ParseFileArg* const parseFileArg = &parseFileArgBody;\n    ParseFileReport* const report = &parseFileArg->report;\n\n    if (displayEndianess != big_endian) {\n        /* Don't accept little endian */\n        DISPLAY( \"Check file mode doesn't support little endian\\n\" );\n        return 0;\n    }\n\n    /* note : stdinName is special constant pointer.  It is not a string. */\n    if (inFileName == stdinName) {\n        /* note : Since we expect text input for xxhash -c mode,\n         * Don't set binary mode for stdin */\n        inFile = stdin;\n    } else {\n        inFile = fopen( inFileName, \"rt\" );\n    }\n\n    if (inFile == NULL) {\n        DISPLAY( \"Pb opening %s\\n\", inFileName);\n        return 0;\n    }\n\n    parseFileArg->inFileName    = inFileName;\n    parseFileArg->inFile        = inFile;\n    parseFileArg->lineMax       = DEFAULT_LINE_LENGTH;\n    parseFileArg->lineBuf       = (char*) malloc((size_t) parseFileArg->lineMax);\n    parseFileArg->blockSize     = 64 * 1024;\n    parseFileArg->blockBuf      = (char*) malloc(parseFileArg->blockSize);\n    parseFileArg->strictMode    = strictMode;\n    parseFileArg->statusOnly    = statusOnly;\n    parseFileArg->warn          = warn;\n    parseFileArg->quiet         = quiet;\n\n    parseFile1(parseFileArg);\n\n    free(parseFileArg->blockBuf);\n    free(parseFileArg->lineBuf);\n\n    if (inFile != stdin) fclose(inFile);\n\n    /* Show error/warning messages.  All messages are copied from md5sum.c\n     */\n    if (report->nProperlyFormattedLines == 0) {\n        DISPLAY(\"%s: no properly formatted XXHASH checksum lines found\\n\", inFileName);\n    } else if (!statusOnly) {\n        if (report->nImproperlyFormattedLines) {\n            DISPLAYRESULT(\"%lu lines are improperly formatted\\n\"\n                , report->nImproperlyFormattedLines);\n        }\n        if (report->nOpenOrReadFailures) {\n            DISPLAYRESULT(\"%lu listed files could not be read\\n\"\n                , report->nOpenOrReadFailures);\n        }\n        if (report->nMismatchedChecksums) {\n            DISPLAYRESULT(\"%lu computed checksums did NOT match\\n\"\n                , report->nMismatchedChecksums);\n    }   }\n\n    /* Result (exit) code logic is copied from\n     * gnu coreutils/src/md5sum.c digest_check() */\n    result =   report->nProperlyFormattedLines != 0\n            && report->nMismatchedChecksums == 0\n            && report->nOpenOrReadFailures == 0\n            && (!strictMode || report->nImproperlyFormattedLines == 0)\n            && report->quit == 0;\n    return result;\n}\n\n\nstatic int checkFiles(const char** fnList, int fnTotal,\n                      const endianess displayEndianess,\n                      U32 strictMode,\n                      U32 statusOnly,\n                      U32 warn,\n                      U32 quiet)\n{\n    int ok = 1;\n\n    /* Special case for stdinName \"-\",\n     * note: stdinName is not a string.  It's special pointer. */\n    if (fnTotal==0) {\n        ok &= checkFile(stdinName, displayEndianess, strictMode, statusOnly, warn, quiet);\n    } else {\n        int fnNb;\n        for (fnNb=0; fnNb<fnTotal; fnNb++)\n            ok &= checkFile(fnList[fnNb], displayEndianess, strictMode, statusOnly, warn, quiet);\n    }\n    return ok ? 0 : 1;\n}\n\n\n/* ********************************************************\n*  Main\n**********************************************************/\n\nstatic int usage(const char* exename)\n{\n    DISPLAY( WELCOME_MESSAGE(exename) );\n    DISPLAY( \"Usage :\\n\");\n    DISPLAY( \"      %s [arg] [filenames]\\n\", exename);\n    DISPLAY( \"When no filename provided, or - provided : use stdin as input\\n\");\n    DISPLAY( \"Arguments :\\n\");\n    DISPLAY( \" -H# : hash selection : 0=32bits, 1=64bits (default: %i)\\n\", (int)g_defaultAlgo);\n    DISPLAY( \" -c  : read xxHash sums from the [filenames] and check them\\n\");\n    DISPLAY( \" -h  : help \\n\");\n    return 0;\n}\n\n\nstatic int usage_advanced(const char* exename)\n{\n    usage(exename);\n    DISPLAY( \"Advanced :\\n\");\n    DISPLAY( \" --little-endian : hash printed using little endian convention (default: big endian)\\n\");\n    DISPLAY( \" -V, --version   : display version\\n\");\n    DISPLAY( \" -h, --help      : display long help and exit\\n\");\n    DISPLAY( \" -b  : benchmark mode \\n\");\n    DISPLAY( \" -i# : number of iterations (benchmark mode; default %i)\\n\", g_nbIterations);\n    DISPLAY( \"\\n\");\n    DISPLAY( \"The following four options are useful only when verifying checksums (-c):\\n\");\n    DISPLAY( \"--strict : don't print OK for each successfully verified file\\n\");\n    DISPLAY( \"--status : don't output anything, status code shows success\\n\");\n    DISPLAY( \"--quiet  : exit non-zero for improperly formatted checksum lines\\n\");\n    DISPLAY( \"--warn   : warn about improperly formatted checksum lines\\n\");\n    return 0;\n}\n\nstatic int badusage(const char* exename)\n{\n    DISPLAY(\"Wrong parameters\\n\");\n    usage(exename);\n    return 1;\n}\n\n/*! readU32FromChar() :\n   @return : unsigned integer value read from input in `char` format,\n             0 is no figure at *stringPtr position.\n    Interprets K, KB, KiB, M, MB and MiB suffix.\n    Modifies `*stringPtr`, advancing it to position where reading stopped.\n    Note : function result can overflow if digit string > MAX_UINT */\nstatic unsigned readU32FromChar(const char** stringPtr)\n{\n    unsigned result = 0;\n    while ((**stringPtr >='0') && (**stringPtr <='9'))\n        result *= 10, result += **stringPtr - '0', (*stringPtr)++ ;\n    if ((**stringPtr=='K') || (**stringPtr=='M')) {\n        result <<= 10;\n        if (**stringPtr=='M') result <<= 10;\n        (*stringPtr)++ ;\n        if (**stringPtr=='i') (*stringPtr)++;\n        if (**stringPtr=='B') (*stringPtr)++;\n    }\n    return result;\n}\n\nint main(int argc, const char** argv)\n{\n    int i, filenamesStart = 0;\n    const char* const exename = argv[0];\n    U32 benchmarkMode = 0;\n    U32 fileCheckMode = 0;\n    U32 strictMode    = 0;\n    U32 statusOnly    = 0;\n    U32 warn          = 0;\n    U32 quiet         = 0;\n    U32 specificTest  = 0;\n    size_t keySize    = XXH_DEFAULT_SAMPLE_SIZE;\n    algoType algo     = g_defaultAlgo;\n    endianess displayEndianess = big_endian;\n\n    /* special case : xxh32sum default to 32 bits checksum */\n    if (strstr(exename, \"xxh32sum\") != NULL) algo = algo_xxh32;\n\n    for(i=1; i<argc; i++) {\n        const char* argument = argv[i];\n\n        if(!argument) continue;   /* Protection, if argument empty */\n\n        if (!strcmp(argument, \"--little-endian\")) { displayEndianess = little_endian; continue; }\n        if (!strcmp(argument, \"--check\")) { fileCheckMode = 1; continue; }\n        if (!strcmp(argument, \"--strict\")) { strictMode = 1; continue; }\n        if (!strcmp(argument, \"--status\")) { statusOnly = 1; continue; }\n        if (!strcmp(argument, \"--quiet\")) { quiet = 1; continue; }\n        if (!strcmp(argument, \"--warn\")) { warn = 1; continue; }\n        if (!strcmp(argument, \"--help\")) { return usage_advanced(exename); }\n        if (!strcmp(argument, \"--version\")) { DISPLAY(WELCOME_MESSAGE(exename)); return 0; }\n\n        if (*argument!='-') {\n            if (filenamesStart==0) filenamesStart=i;   /* only supports a continuous list of filenames */\n            continue;\n        }\n\n        /* command selection */\n        argument++;   /* note : *argument=='-' */\n\n        while (*argument!=0) {\n            switch(*argument)\n            {\n            /* Display version */\n            case 'V':\n                DISPLAY(WELCOME_MESSAGE(exename)); return 0;\n\n            /* Display help on usage */\n            case 'h':\n                return usage_advanced(exename);\n\n            /* select hash algorithm */\n            case 'H':\n                algo = (algoType)(argument[1] - '0');\n                argument+=2;\n                break;\n\n            /* File check mode */\n            case 'c':\n                fileCheckMode=1;\n                argument++;\n                break;\n\n            /* Warning mode (file check mode only, alias of \"--warning\") */\n            case 'w':\n                warn=1;\n                argument++;\n                break;\n\n            /* Trigger benchmark mode */\n            case 'b':\n                argument++;\n                benchmarkMode = 1;\n                specificTest = readU32FromChar(&argument);   /* select one specific test (hidden option) */\n                break;\n\n            /* Modify Nb Iterations (benchmark only) */\n            case 'i':\n                argument++;\n                g_nbIterations = readU32FromChar(&argument);\n                break;\n\n            /* Modify Block size (benchmark only) */\n            case 'B':\n                argument++;\n                keySize = readU32FromChar(&argument);\n                break;\n\n            /* Modify verbosity of benchmark output (hidden option) */\n            case 'q':\n                argument++;\n                g_displayLevel--;\n                break;\n\n            default:\n                return badusage(exename);\n            }\n        }\n    }   /* for(i=1; i<argc; i++) */\n\n    /* Check benchmark mode */\n    if (benchmarkMode) {\n        DISPLAYLEVEL(2, WELCOME_MESSAGE(exename) );\n        BMK_sanityCheck();\n        if (filenamesStart==0) return BMK_benchInternal(keySize, specificTest);\n        return BMK_benchFiles(argv+filenamesStart, argc-filenamesStart, specificTest);\n    }\n\n    /* Check if input is defined as console; trigger an error in this case */\n    if ( (filenamesStart==0) && IS_CONSOLE(stdin) ) return badusage(exename);\n\n    if (filenamesStart==0) filenamesStart = argc;\n    if (fileCheckMode) {\n        return checkFiles(argv+filenamesStart, argc-filenamesStart,\n                          displayEndianess, strictMode, statusOnly, warn, quiet);\n    } else {\n        return BMK_hashFiles(argv+filenamesStart, argc-filenamesStart, algo, displayEndianess);\n    }\n}\n\n#endif /* XXHASH_C_2097394837 */\n"
  },
  {
    "path": "html-export/dist/htmlexportres.qrc",
    "content": "<!DOCTYPE RCC><RCC version=\"1.0\">\n<qresource>\n    <file>index.html</file>\n    <file>main.js</file>\n</qresource>\n</RCC>\n"
  },
  {
    "path": "html-export/dist/index.html",
    "content": "<!DOCTYPE html>\n<html>\n<head>\n  <meta charset=\"utf-8\">\n  <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\n  <link rel=\"stylesheet\" href=\"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\" integrity=\"sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T\" crossorigin=\"anonymous\">\n\n  <style>\n    :root {\n      --annotation-font-family: monospace;\n      --annotation-fontsize-normal:\n    }\n\n    body, html {\n      margin-left:5px;\n      margin-right:5px;\n      margin-bottom:5px;\n      margin-top:5px;\n    }\n\n    .axis line,\n    .axis path {\n      stroke: #333;\n      fill: transparent;\n      shape-rendering: crispEdges;\n    }\n\n    .line-graph {\n      stroke: #555;\n      fill: transparent;\n    }\n\n    .unselectable {\n      -webkit-touch-callout: none;\n      -webkit-user-select: none;\n      -khtml-user-select: none;\n      -moz-user-select: none;\n      -ms-user-select: none;\n      user-select: none;\n    } \n\n    .annotation,\n    #annotation_text_char {\n      fill: black;\n      font-size: 10px;\n      font-family: monospace; /* monospace for simple text-width-calculations */\n    }\n\n    .zoomButton {\n      display: block;\n      float: left;\n      margin: 0 3px -3px 0 !important;\n      padding: 0;\n      border: none;\n      width: 3ch;\n      background-color: #f5f5f5;\n      font-family: monospace;\n      font-size: 12px; /* use px, since we render svg in pixels ourselves. */\n      text-decoration: none;\n      font-weight: bold;\n      color: #565656;\n      cursor: pointer;\n    }\n\n    #topError{\n      color: red;\n      visibility: hidden;\n    }\n\n    #cmdListScroll {\n      width: 100%;\n      /* height:50vh; */\n      overflow: scroll;\n      position: relative;\n      /* do not remove */\n    }\n\n    .collapsibleCmd {\n      color: white;\n      cursor: pointer;\n      width: 100%;\n      border: none;\n      text-align: left;\n      outline: none;\n      font-size: 15px;\n    }\n\n    .active,\n    .collapsibleCmd:hover {\n      background-color: #555;\n    }\n\n    .collapsibleCmdContent {\n      padding: 0 18px;\n      /* max-height: 0;\n      overflow: hidden; */\n      transition: max-height 0.2s ease-out;\n      background-color: #f1f1f1;\n    }\n\n    #genericModalBody,\n    .modal-title{\n      white-space: pre-wrap;\n      word-wrap: break-word;\n    }\n\n  </style>\n</head>\n\n<body>\n  <h3>shournal report</h3>\n  <div id=\"topError\"></div>\n  <div id=\"initialSpinner\" class=\"d-flex justify-content-center\">\n    <div class=\"spinner-border\" role=\"status\">\n      <span class=\"sr-only\">Loading...</span>\n    </div>\n  </div>\n\n<!--The next line will be injected when generating from shournal, the\n    sample data is only included here to ease development. \n    DO NOT EDIT. -->\n  <script src=\"SAMPLE_DATA.js\"></script>\n  <script>\n    if(typeof commands === 'undefined'){\n      window.commands = JSON.parse(document.getElementById('commandJSON').innerHTML);\n    }  \n  </script>\n\n\n  <!-- Let this stay on top...-->\n  <span id=\"annotation_text_char\" style=\"position: absolute; top: -100px;\">A</span>\n\n  <!-- Generic dialog (for read files) -->\n  <div class=\"modal fade\" id=\"genericModal\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"Read file\" aria-hidden=\"true\">\n    <div class=\"modal-dialog mw-100 w-75\" role=\"document\">\n      <div class=\"modal-content\">\n        <div class=\"modal-header\">\n          <h5 class=\"modal-title\" id=\"genericModalTitle\">Read file</h5>\n          <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\">\n            <span aria-hidden=\"true\">&times;</span>\n          </button>\n        </div>\n        <!-- content added dynamically -->\n        <div class=\"modal-body\" id=\"genericModalBody\">...</div>\n        <div class=\"modal-footer\">\n          <button type=\"button\" class=\"btn btn-secondary\" data-dismiss=\"modal\">Close</button>\n        </div>\n      </div>\n    </div>\n  </div>\n  <script>\n    let scriptLoadError = '';\n    function setScriptLoadError(scriptname){\n      scriptLoadError = 'Failed to load external dependency ' + scriptname +\n        '. Please make sure to have a running internet connection.';\n    }\n  </script>\n  <!-- Load d3.js -->\n  <script onerror=\"setScriptLoadError('d3')\" src=\"https://d3js.org/d3.v4.min.js\"></script>\n  <!-- Load bootstrap js and its dependencies -->\n  <!-- jQuery first, then Popper.js, then Bootstrap JS -->\n  <script onerror=\"setScriptLoadError('jq')\" src=\"https://code.jquery.com/jquery-3.3.1.slim.min.js\" integrity=\"sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo\" crossorigin=\"anonymous\"></script>\n  <script onerror=\"setScriptLoadError('popper')\" src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\" integrity=\"sha384-UO2eT0CpHqdSJQ6hJty5KVphtPhzWj9WO1clHTMGa3JDZwrnQq4sF86dIHNDz0W1\" crossorigin=\"anonymous\"></script>\n  <script onerror=\"setScriptLoadError('bootstrap')\" src=\"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\" integrity=\"sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM\" crossorigin=\"anonymous\"></script>\n  <!-- To allow for a single generated html file, below script is injected\n     and written to the corresponding cpp-file(!) on build.\n     DO NOT EDIT. -->\n  <script src=\"main.js\"></script>\n  </body>\n\n</html>\n"
  },
  {
    "path": "html-export/dist/main.js",
    "content": "/******/ (function(modules) { // webpackBootstrap\n/******/ \t// The module cache\n/******/ \tvar installedModules = {};\n/******/\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(installedModules[moduleId]) {\n/******/ \t\t\treturn installedModules[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = installedModules[moduleId] = {\n/******/ \t\t\ti: moduleId,\n/******/ \t\t\tl: false,\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/\n/******/ \t\t// Execute the module function\n/******/ \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n/******/\n/******/ \t\t// Flag the module as loaded\n/******/ \t\tmodule.l = true;\n/******/\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/\n/******/\n/******/ \t// expose the modules object (__webpack_modules__)\n/******/ \t__webpack_require__.m = modules;\n/******/\n/******/ \t// expose the module cache\n/******/ \t__webpack_require__.c = installedModules;\n/******/\n/******/ \t// define getter function for harmony exports\n/******/ \t__webpack_require__.d = function(exports, name, getter) {\n/******/ \t\tif(!__webpack_require__.o(exports, name)) {\n/******/ \t\t\tObject.defineProperty(exports, name, { enumerable: true, get: getter });\n/******/ \t\t}\n/******/ \t};\n/******/\n/******/ \t// define __esModule on exports\n/******/ \t__webpack_require__.r = function(exports) {\n/******/ \t\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n/******/ \t\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n/******/ \t\t}\n/******/ \t\tObject.defineProperty(exports, '__esModule', { value: true });\n/******/ \t};\n/******/\n/******/ \t// create a fake namespace object\n/******/ \t// mode & 1: value is a module id, require it\n/******/ \t// mode & 2: merge all properties of value into the ns\n/******/ \t// mode & 4: return value when already ns object\n/******/ \t// mode & 8|1: behave like require\n/******/ \t__webpack_require__.t = function(value, mode) {\n/******/ \t\tif(mode & 1) value = __webpack_require__(value);\n/******/ \t\tif(mode & 8) return value;\n/******/ \t\tif((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;\n/******/ \t\tvar ns = Object.create(null);\n/******/ \t\t__webpack_require__.r(ns);\n/******/ \t\tObject.defineProperty(ns, 'default', { enumerable: true, value: value });\n/******/ \t\tif(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));\n/******/ \t\treturn ns;\n/******/ \t};\n/******/\n/******/ \t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t__webpack_require__.n = function(module) {\n/******/ \t\tvar getter = module && module.__esModule ?\n/******/ \t\t\tfunction getDefault() { return module['default']; } :\n/******/ \t\t\tfunction getModuleExports() { return module; };\n/******/ \t\t__webpack_require__.d(getter, 'a', getter);\n/******/ \t\treturn getter;\n/******/ \t};\n/******/\n/******/ \t// Object.prototype.hasOwnProperty.call\n/******/ \t__webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };\n/******/\n/******/ \t// __webpack_public_path__\n/******/ \t__webpack_require__.p = \"\";\n/******/\n/******/\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(__webpack_require__.s = 2);\n/******/ })\n/************************************************************************/\n/******/ ([\n/* 0 */\n/***/ (function(module, exports, __webpack_require__) {\n\n\"use strict\";\n\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar Mutex_1 = __webpack_require__(1);\nexports.Mutex = Mutex_1.default;\n\n\n/***/ }),\n/* 1 */\n/***/ (function(module, exports, __webpack_require__) {\n\n\"use strict\";\n\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar Mutex = /** @class */ (function () {\n    function Mutex() {\n        this._queue = [];\n        this._pending = false;\n    }\n    Mutex.prototype.isLocked = function () {\n        return this._pending;\n    };\n    Mutex.prototype.acquire = function () {\n        var _this = this;\n        var ticket = new Promise(function (resolve) { return _this._queue.push(resolve); });\n        if (!this._pending) {\n            this._dispatchNext();\n        }\n        return ticket;\n    };\n    Mutex.prototype.runExclusive = function (callback) {\n        return this\n            .acquire()\n            .then(function (release) {\n            var result;\n            try {\n                result = callback();\n            }\n            catch (e) {\n                release();\n                throw (e);\n            }\n            return Promise\n                .resolve(result)\n                .then(function (x) { return (release(), x); }, function (e) {\n                release();\n                throw e;\n            });\n        });\n    };\n    Mutex.prototype._dispatchNext = function () {\n        if (this._queue.length > 0) {\n            this._pending = true;\n            this._queue.shift()(this._dispatchNext.bind(this));\n        }\n        else {\n            this._pending = false;\n        }\n    };\n    return Mutex;\n}());\nexports.default = Mutex;\n\n\n/***/ }),\n/* 2 */\n/***/ (function(module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n__webpack_require__.r(__webpack_exports__);\n\n// CONCATENATED MODULE: ./src/generic_text_dialog.js\n\n\nclass GenericTextDialog {\n  constructor() {\n  }\n\n  show(title, content){\n    $(\"#genericModalTitle\").html(title);\n    $(\"#genericModalBody\").html(content);\n    $(\"#genericModal\").modal('toggle');\n  }\n}\n\n// CONCATENATED MODULE: ./src/globals.js\n\n\n\n\nlet humanDateFormat;\nlet humanDateFormatOnlyDate;\nlet humanDateFormatOnlyTime;\n\nlet d3TimeParseIsoWithMil;\n\nlet textDialog;\n\nlet commandList;\nlet sessionTimeline;\n\nfunction init(){\n  humanDateFormat = d3.timeFormat(\"%Y-%m-%d %H:%M\");\n  humanDateFormatOnlyDate = d3.timeFormat(\"%Y-%m-%d\");\n  humanDateFormatOnlyTime = d3.timeFormat(\"%H:%M\");\n\n  d3TimeParseIsoWithMil = d3.timeParse(\"%Y-%m-%dT%H:%M:%S.%L\");\n\n  textDialog = new GenericTextDialog();\n}\n\n// CONCATENATED MODULE: ./src/command_manipulation.js\n\n\n\n/**\n * Parse the command-date into d3's date and assign session colors\n * @param {[Command]} commands\n */\nfunction prepareCommands(commands){\n  commands.forEach(function(cmd) {\n    cmd.startTime = d3TimeParseIsoWithMil(cmd.startTime);\n    cmd.endTime = d3TimeParseIsoWithMil(cmd.endTime);\n  });\n  _fillCommandSessionColors(commands);\n}\n\n/**\n * Can be passed to array.sort or similar functions.\n * @param {*} cmd1 \n * @param {*} cmd2 \n * @return {int}\n */\nfunction compareStartDates(cmd1, cmd2) {\n  return cmd1.startTime - cmd2.startTime;\n}\n\n/**\n * Can be passed to array.sort or similar functions.\n * @param {*} cmd1 \n * @param {*} cmd2 \n * @return {int}\n */\nfunction compareEndDates(cmd1, cmd2) {\n  return cmd1.endTime - cmd2.endTime;\n}\n\n\n/**\n * Assign session-colors to the given commands.\n * Each session gets a specific color, after n sessions occurred, colors\n * start from beginning again.\n *  @param {[Command]} commands\n*/\nfunction _fillCommandSessionColors(commands){\n  const DISTINCT_COLORS = [\n    '#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0',\n    '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8',\n    '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080',\n  ];\n  let lastColorIdx = 0;\n  const sessionColorMap = new Map();\n  commands.forEach(function(cmd) {\n    if(cmd.sessionUuid === null){\n      cmd.sessionColor = '#000000';\n    } else {\n      let color = sessionColorMap.get(cmd.sessionUuid);\n      if(color === undefined){\n        color = DISTINCT_COLORS[lastColorIdx];\n        sessionColorMap.set(cmd.sessionUuid, color);\n        lastColorIdx++;\n        if(lastColorIdx >= DISTINCT_COLORS.length){\n          lastColorIdx = 0;\n        }\n      }\n      cmd.sessionColor = color;\n    }\n  });\n}\n\n// CONCATENATED MODULE: ./src/html_util.js\n\n\nfunction insertAfter(newNode, referenceNode) {\n  referenceNode.parentNode.insertBefore(newNode, referenceNode.nextSibling);\n}\n\n\n/**\n * Check if element is visible inside container - also partially at your wish.\n * @return {boolean}\n * @param {Element} element \n * @param {Element} container \n * @param {boolean} partial if true, return true, if not completely but partially\n * visible\n */\nfunction isScrolledIntoView(element, container, partial) {\n   // Get container properties\n   const cTop = container.scrollTop;\n   const cBottom = cTop + container.clientHeight;\n\n   // Get element properties\n   const eTop = element.offsetTop;\n   const eBottom = eTop + element.clientHeight;\n\n   // Check if in view    \n   const isTotal = (eTop >= cTop && eBottom <= cBottom);\n   const isPartial = partial && (\n     (eTop < cTop && eBottom > cTop) ||\n     (eBottom > cBottom && eTop < cBottom)\n   );\n\n   return (isTotal || isPartial);\n}\n\n// CONCATENATED MODULE: ./src/util.js\n\nclass ErrorNotImplemented extends Error { \n  constructor() {\n    super('Required method not implemented');\n  }\n}\n\nfunction sleep(ms) {\n  return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\nfunction getTime() {\n  return new Date().getTime();\n}\n\nfunction date_max(d1, d2){\n  return d1 > d2 ? d1 : d2;\n}\n\nfunction date_min(d1, d2){\n  return d1 < d2 ? d1 : d2;\n}\n\nfunction windowWidth() {\n  return window.innerWidth ||\n    document.documentElement.clientWidth ||\n    document.body.clientWidth;\n}\n\n\nfunction windowHeight() {\n  return window.innerHeight ||\n    document.documentElement.clientHeight ||\n    document.body.clientHeight;\n}\n\n\nfunction assert(condition, message) {\n  if (!condition){\n    throw Error('Assert failed: ' + (message || ''));\n  }\n}\n\nconst DATE_MIN = new Date(-8640000000000000);\n\n/**\n * non-blocking .foreach array loop.\n * @param {*} array \n * @param {*} func \n */\nasync function timedForEach(array, func) {\n  const maxTimePerChunk = 200; // max 200ms until next sleep\n  function getTime() {\n    return new Date().getTime();\n  }\n  \n  let lastStart = getTime();\n  for (let i=0; i < array.length; i++) {\n    func(array[i], i, array); \n    const now = getTime();\n    if(now - lastStart > maxTimePerChunk){\n      // enough computation time used\n      await sleep(5);\n      lastStart = now;\n    }\n  }\n}\n\n\n/**\n * Binary search.\n * @param {[]} ar sorted array, may contain duplicate elements.\n * If there are more than one equal elements in the array,\n * the returned value can be the index of any one of the equal elements.\n * @param {*} el element to search for\n * @param {function}  compareFn  A comparator function. The function takes two arguments: (a, b) and returns:\n *        a negative number  if a is less than b;\n *        0 if a is equal to b;\n *        a positive number of a is greater than b.\n * @param {boolean} clipIdx see @return: \n * @return {int} if clipIdx is false: index of of the element in a sorted array or (-n-1) where n\n * is the insertion point for the new element. \n * If clipIdx is true: return an index within the array element bounds, independent of\n * wheter the element exists or not (the best matching existing index is returned).\n */\nfunction binarySearch(ar, el, compareFn, clipIdx=false) {\n  const clipIdxIfOn = (idx) => {\n    if(! clipIdx){\n      return idx;\n    }\n    if (idx < 0) {\n      idx = -(idx + 1);\n    }\n    if (idx >= ar.length) {\n      return ar.length - 1;\n    }\n    return idx;\n  };\n  \n  let m = 0;\n  let n = ar.length - 1;\n  while (m <= n) {\n    const k = (n + m) >> 1;\n    const cmp = compareFn(el, ar[k]);\n    if (cmp > 0) {\n      m = k + 1;\n    } else if(cmp < 0) {\n      n = k - 1;\n    } else {\n      return clipIdxIfOn(k);\n    }\n  }\n  return clipIdxIfOn(-m - 1);\n}\n\n/**\n * Get the directry of a unix path, e.g. the path /home/user/foo\n * would return /home/user.\n * @return {String}\n * @param {String} path \n */\nfunction getDirFromAbsPath(path){\n  return path.substring(0,path.lastIndexOf(\"/\"));\n}\n\n\n\n// CONCATENATED MODULE: ./src/conversions.js\n\n/**\n * @return {String} human readble byte-size-string\n * @param {int} bytes \n * @param {boolean} si if true: use 1000 as base (kB), else 1024 (KiB)\n */\nfunction bytesToHuman(bytes, si = false) {\n  const thresh = si ? 1000 : 1024;\n  if (Math.abs(bytes) < thresh) {\n    return bytes + ' B';\n  }\n  const units = si ?\n   ['kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] :\n   ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'];\n  let u = -1;\n  do {\n    bytes /= thresh;\n    ++u;\n  } while (Math.abs(bytes) >= thresh && u < units.length - 1);\n  return bytes.toFixed(1) + ' ' + units[u];\n}\n\n// CONCATENATED MODULE: ./src/command_list.js\n\n\n\n\n\n\nclass command_list_CommandList {\n  constructor(commands) {\n\n    this._CMDLISTPADDING = 18;\n    this._CMDLISTBG = '#777';\n\n    const cmdListHeight = (() => {\n      const boundClient = sessionTimeline.getSvg().node().getBoundingClientRect();\n      let h = windowHeight() - (boundClient.y + boundClient.height) - 30; // why minus 30?\n      if (h < 200) {\n        // screen too small (or too many command groups): allow for scrolling\n        h = 300;\n      }\n      return h;\n    })();\n\n    const cmdListScroll = d3.select('body').append('div')\n      .attr('id', 'cmdListScroll')\n      .style('height', cmdListHeight + 'px');\n\n\n    cmdListScroll.selectAll('.collapsibleCmd')\n      .data(commands)\n      .enter()\n      .append('button')\n      .attr('class', 'collapsibleCmd')\n      .attr('id', (cmd) => { return 'cmdListEntry' + cmd.id; })\n      .html((cmd) => {\n        // only display year,month,day of endTime if different from start\n        const actualEndFormat = (cmd.startTime.getFullYear() == cmd.endTime.getFullYear() &&\n          cmd.startTime.getMonth() == cmd.endTime.getMonth() &&\n          cmd.startTime.getDay() == cmd.endTime.getDay()\n        ) ? humanDateFormatOnlyTime : humanDateFormat;\n        return humanDateFormat(cmd.startTime) + ' - ' +\n          actualEndFormat(cmd.endTime) + ': ' +\n          cmd.command;\n      })\n      .style('padding', this._CMDLISTPADDING + 'px')\n      .style('background', (cmd) => { return this._computeCmdBackground(cmd); })\n      .on(\"click\", (cmd, idx) => {\n        if (document.readyState !== \"complete\"){\n          // silently ignore clicks, until everything loaded...\n          return;\n        }\n\n        this._handleClickOnCmd(cmd, idx);\n      });\n  }\n\n  /**\n  * @param {*} cmd command-object to scroll to\n  */\n  scrollToCmd(cmd) {\n    const cmdElement = this._selectCmdEntry(cmd);\n    const scroll = document.getElementById('cmdListScroll');\n    scroll.scrollTop = cmdElement.node().offsetTop;\n\n    cmdElement\n      .transition()\n      .duration(1300) // miliseconds\n      .style(\"background\", \"red\")\n      .on(\"end\", () => { \n        cmdElement.style('background', (cmd) => { return this._computeCmdBackground(cmd); });\n      });\n  }\n\n  _selectCmdEntry(cmd){\n    return d3.select(`#cmdListEntry${cmd.id}`);\n  }\n\n  _computeCmdBackground(cmd){\n    return `linear-gradient(to right,\n      ${cmd.sessionColor} 0px, ${cmd.sessionColor} ${this._CMDLISTPADDING - 1}px,\n      ${this._CMDLISTBG} ${this._CMDLISTPADDING - 1}px, ${this._CMDLISTBG} 100%)`;\n  }\n\n  _handleClickOnCmd(cmd, idx){\n    let contentDiv = d3.select(`#cmdcontent${cmd.id}`);\n    if (! contentDiv.empty()) {\n      contentDiv.remove();\n      return;\n    }\n\n    contentDiv = d3.select('body').append('div')\n      .attr('id', `cmdcontent${cmd.id}`)\n      .attr('class', 'collapsibleCmdContent')\n      .html(`Working directory: ${cmd.workingDir}<br>` +\n        `Command exit status: ${cmd.returnValue}<br>` +\n        `Session uuid: ${cmd.sessionUuid}<br>` +\n        `Command id: ${cmd.id}<br>` +\n        `Hostname: ${cmd.hostname}<br>`);\n\n    const alternatingColor = '#D9D9D9';\n    \n    if (cmd.fileWriteEvents_length > 0) {\n      contentDiv.append('span')\n        .html(cmd.fileWriteEvents_length + ' written files')\n        .style('color', 'red')\n        .style('display', 'block');\n      \n      contentDiv.selectAll('.nonexistentClass')\n        .data(cmd.fileWriteEvents)\n        .enter()\n        .append('span')\n        .style('display', 'block')\n        .style('background-color', (e, idx) => {\n          return (idx % 2 === 0) ? 'transparent' : alternatingColor;\n        })\n        .text((e) => {\n          return `${e.path} (${bytesToHuman(e.size)}), Hash: ${e.hash}`;\n        });\n\n      if (cmd.fileWriteEvents.length !== cmd.fileWriteEvents_length) {\n        contentDiv.append('span').html(\n          `... and ` +\n          `${cmd.fileWriteEvents_length - cmd.fileWriteEvents.length}` +\n          ` more (see shournal's query help to increase limits)<br>`);\n      }\n    }\n\n    if (cmd.fileReadEvents_length > 0) {\n      if(cmd.fileWriteEvents_length > 0){\n         contentDiv.append('span').html('<br>');\n      }\n      contentDiv.append('span')\n      .html(cmd.fileReadEvents_length + ' read files')\n      .style('color', 'red')\n      .style('display', 'block');\n    }\n \n    contentDiv.selectAll('.nonexistentClass')\n      .data(cmd.fileReadEvents)\n      .enter()\n      .append('span')\n      .style('background-color', (e, idx) => {\n        return (idx % 2 === 0) ? 'transparent' : alternatingColor;\n      })\n      .style('color', (readFile) => { return (readFile.isStoredToDisk) ? 'blue' : 'black'; })\n      .style('cursor', (readFile) => { return (readFile.isStoredToDisk) ? 'pointer' : 'default'; })\n      .style('display', 'block') // only one read file per line\n      .text((e) => { return `${e.path} (${bytesToHuman(e.size)}), Hash: ${e.hash}`; })\n      .on(\"click\", (readFile) => {\n        if (readFile.isStoredToDisk) {\n          const mtimeHuman = humanDateFormat(d3.isoParse(readFile.mtime));\n          const title = `Read file ${readFile.path}<br>` +\n                        `mtime: ${mtimeHuman}<br>` +\n                        `size: ${bytesToHuman(readFile.size)}<br>` + \n                        `hash: ${readFile.hash}<br>`;\n\n          const readFileContent = atob(readFileContentMap.get(readFile.id));\n          textDialog.show(title, readFileContent);\n        }\n      });\n    \n    if (cmd.fileReadEvents.length !== cmd.fileReadEvents_length) {\n      contentDiv.append('span').html(\n        `... and ` +\n        `${cmd.fileReadEvents_length - cmd.fileReadEvents.length}` +\n        ` more (see shournal's query help to increase limits)<br>`\n      );\n    }\n\n      \n    const cmdElement = this._selectCmdEntry(cmd);\n    insertAfter(contentDiv.node(), cmdElement.node());\n\n    const cmdListScroll = document.getElementById('cmdListScroll');\n    if(! isScrolledIntoView(contentDiv.node(), cmdListScroll, true)){\n      // scroll down one element, so at least the beginning of content is visible:\n      cmdListScroll.scrollTop += cmdElement.node().clientHeight;  \n    } \n  }\n}\n\n// CONCATENATED MODULE: ./src/map_extended.js\n\n\nclass MapExtended extends Map {\n  \n  /**\n   * Like get() but insert and return a default, if the key\n   * does not exist\n   * @return {*} \n   * @param {*} key \n   * @param {Function} defaultFactory A parameterless function whose return value\n   * is used as default.\n   */\n  getDefault(key, defaultFactory) {\n    if(defaultFactory === undefined){\n      throw Error('defaultValue must not be undefined');\n    }\n    let val = this.get(key);\n    if(val === undefined){\n      val = defaultFactory();\n      this.set(key, val);\n    }\n    return val;\n  }\n\n}\n\n// CONCATENATED MODULE: ./node_modules/tinyqueue/index.js\n\nclass TinyQueue {\n    constructor(data = [], compare = defaultCompare) {\n        this.data = data;\n        this.length = this.data.length;\n        this.compare = compare;\n\n        if (this.length > 0) {\n            for (let i = (this.length >> 1) - 1; i >= 0; i--) this._down(i);\n        }\n    }\n\n    push(item) {\n        this.data.push(item);\n        this.length++;\n        this._up(this.length - 1);\n    }\n\n    pop() {\n        if (this.length === 0) return undefined;\n\n        const top = this.data[0];\n        const bottom = this.data.pop();\n        this.length--;\n\n        if (this.length > 0) {\n            this.data[0] = bottom;\n            this._down(0);\n        }\n\n        return top;\n    }\n\n    peek() {\n        return this.data[0];\n    }\n\n    _up(pos) {\n        const {data, compare} = this;\n        const item = data[pos];\n\n        while (pos > 0) {\n            const parent = (pos - 1) >> 1;\n            const current = data[parent];\n            if (compare(item, current) >= 0) break;\n            data[pos] = current;\n            pos = parent;\n        }\n\n        data[pos] = item;\n    }\n\n    _down(pos) {\n        const {data, compare} = this;\n        const halfLength = this.length >> 1;\n        const item = data[pos];\n\n        while (pos < halfLength) {\n            let left = (pos << 1) + 1;\n            let best = data[left];\n            const right = left + 1;\n\n            if (right < this.length && compare(data[right], best) < 0) {\n                left = right;\n                best = data[right];\n            }\n            if (compare(best, item) >= 0) break;\n\n            data[pos] = best;\n            pos = left;\n        }\n\n        data[pos] = item;\n    }\n}\n\nfunction defaultCompare(a, b) {\n    return a < b ? -1 : a > b ? 1 : 0;\n}\n\n// CONCATENATED MODULE: ./src/timeline_group_find.js\n\n\n\n\n/**\n * Find \"groups\" in an ordered timeline, so that parallel \n * events get different (low) groups (integers starting from zero). \n * Events are defined by start- and end-date. The container, for\n * whose elements findNextFreeGroup may be called subsequentially,\n * must be ordered by start-date.\n */\nclass timeline_group_find_TimelineGroupFind {\n\n  constructor(){\n    this._lastEndDates = [];\n    this._freeGroups = new TinyQueue();\n  }\n\n  /**\n   * @return {int} lowest free group, starting from 0.\n   * @param {Date} startDate start date of the next time element \n   * @param {Date} endDate end date of the next time element\n   */\n  findNextFreeGroup(startDate, endDate){\n    for (let i = this._lastEndDates.length - 1; i >= 0; i--) {\n      if (startDate > this._lastEndDates[i].endTime) {\n        this._freeGroups.push(this._lastEndDates[i].group);\n        this._lastEndDates.splice(i, 1);\n      }\n    }\n    // if we have free groups (from previous runs) use the lowest free group, \n    // else add a new one\n    const group = (this._freeGroups.length > 0) ? this._freeGroups.pop() : \n      this._lastEndDates.length;\n    this._lastEndDates.push(new _LastEndDateGroup(group, endDate));\n    return group;\n  }\n}\n\n\nclass _LastEndDateGroup {\n  constructor(group, endTime){\n    this.group = group;\n    this.endTime = endTime;\n  }\n}\n\n// EXTERNAL MODULE: ./node_modules/async-mutex/lib/index.js\nvar lib = __webpack_require__(0);\n\n// CONCATENATED MODULE: ./src/annotation_line_render.js\n\n\n\n\n\n\n/**\n * Render Groups of annotations on a per-line-basis. Clip annotation texts \n * and omit annotations as needed to fit into available space\n */\nclass annotation_line_render_AnnotationLineRender {\n  constructor(plot) {\n    this._annotationGroups = [];\n    this._plot = plot;\n    // get the width in pixel of a character\n    this._annotationCharWidth = d3.select(\"#annotation_text_char\").node()\n      .getBoundingClientRect().width;\n    // do not render an annotation which does not fit into the space.\n    this._annotationMinWidth = this._annotationCharWidth * 2;\n    // clip annotation-texts after that many characters\n    this._annotationMaxNumChars = 15;\n    this._updateMutex = new lib[\"Mutex\"]();\n    this._lastUpdateDummy = null;\n  }\n\n  /**\n   * \n   * @param {Array<Annotation>} group: ordered set of annotations which will be rendered\n   * within the same line. Base class is the same as d3 annotation, however, the following\n   * *additional* fields must be set: startX, endX, fulltext. The annotation position\n   * (x,y) has to be set already, based on the x-values it is decided, how much of\n   * an annotation is drawn.\n   */\n  addAnnotationGroup(group) {\n    this._annotationGroups.push(group);\n  }\n\n\n  async update(xScale) {\n    this._lastUpdateDummy = {};\n    const currentUpdateDummy = this._lastUpdateDummy;\n\n    const release = await this._updateMutex.acquire();\n    try {\n      // remove and add again seems to be faster than updating\n      this._plot.selectAll('.annotation').remove();\n      this._plot.selectAll('.annotationVertLine').remove();\n      this._plot.selectAll('.annotationHorizLine').remove();\n\n      const annotations = await this._preRenderAnnotations(xScale, currentUpdateDummy);\n      if (annotations !== null) {\n        this._appendAnnotations(annotations);\n      }\n    } finally {\n      release();\n    }\n  } \n\n  setOnNoteClick(func){\n    this._onNoteClick = func;\n  }\n\n  // ***************** PRIVATE ********************\n\n  _compareStartX(prev, current) {\n    return prev.startX - current.startX;\n  }  \n\n  _compareEndX(prev, current) {\n    return prev.endX - current.endX;\n  }  \n\n  async _preRenderAnnotations(xScale, currentUpdateDummy ) {  \n    \n    const annotations = [];\n    // uniform interface for binary search, where the entrance indeces are found\n    const dummyAnnotation = {\n      startX: xScale.domain()[0],\n      endX: xScale.domain()[1],\n    };\n\n    const plotWidth = this._plot.node().getBBox().width;\n    for(const annotationLine of this._annotationGroups) {\n      if (annotationLine.length == 0) {\n        continue;\n      }\n      // Do not render annotations outside the current view\n      // -> find start and stop indeces in the group:\n      // Note: one cannot simply choose 0 and length -1 after zooming\n      // out, because panning also has to be respected.\n      const startIdx = binarySearch(annotationLine, dummyAnnotation, \n        this._compareStartX, true);\n      const endIdx = binarySearch(annotationLine, dummyAnnotation, \n        this._compareEndX, true);\n\n      let displayAnnotation = annotationLine[startIdx];\n      displayAnnotation.x = this._calcAnnotationCenter(displayAnnotation, xScale);\n\n      for (let idx = startIdx + 1; idx <= endIdx; idx++) {\n        // this.update is run async: check if it was called in between. If that's the\n        // case we can abort, because or xScale is outdated.\n        if (currentUpdateDummy !== this._lastUpdateDummy) {\n          return null;\n        }  \n        \n        if(idx % 30 === 0){\n          // avoid freezing the DOM...\n          await sleep(5);\n        }        \n\n        const annotation = annotationLine[idx];\n        annotation.x = this._calcAnnotationCenter(annotation, xScale);\n\n        const textspace = annotation.x - displayAnnotation.x -\n          (this._annotationCharWidth * 2); // subtract more chars to leave space to next annotation\n        const annotationTxt = this._generateAnnotationTxt(textspace, displayAnnotation.fulltext);\n        if (annotationTxt == null) {\n          // do not render this annotation\n          continue;\n        }\n        // always update text, we might have zoomed before!\n        displayAnnotation.note.label = annotationTxt;\n        annotations.push(displayAnnotation);\n        displayAnnotation = annotation;\n      }\n\n      // still need to push the final annotation, if it fits into our plot\n      const textspace = plotWidth - displayAnnotation.x;\n      const annotationTxt = this._generateAnnotationTxt(textspace, displayAnnotation.fulltext);\n      if (annotationTxt != null) {\n        displayAnnotation.note.label = annotationTxt;\n        annotations.push(displayAnnotation);\n      }\n    }\n    return annotations;\n  }\n\n  _calcAnnotationCenter(annotation, xScale) {\n    return (xScale(annotation.startX) + xScale(annotation.endX)) / 2.0;\n  }\n\n    /**\n   * @param {*} textspace Available width in pixel\n   * @param {*} txt The full text\n   * @return {*} null, if textspace was too small, else the full or clipped text\n   */\n  _generateAnnotationTxt(textspace, txt) {\n    if (textspace < this._annotationMinWidth) {\n      return null;\n    }\n\n    // Render only so many chars that fit into the space, but not more than\n    // _annotationMaxNumChars;\n    const maxCountOfRenderChars = Math.min(Math.ceil(textspace / this._annotationCharWidth) , \n      this._annotationMaxNumChars);\n    \n    if (txt.length <= maxCountOfRenderChars ) {\n      return txt;\n    }\n    return txt.substring(0, maxCountOfRenderChars - 1) + '.';\n  }\n\n\n  /**\n   * Append all annotations to the plot and setup mouse event handlers\n   * @param {[annotation]} annotations\n   */\n  _appendAnnotations(annotations) {\n    const enterSelection = this._plot.selectAll(\".annotation\")\n      .data(annotations)\n      .enter();\n\n    enterSelection\n      .append(\"text\")\n      .attr('class', 'annotation unselectable' )\n      .attr('x', (a) => { return a.x; })\n      .attr('y', (a) => { return a.ny; })\n      .text((a) => { return a.note.label; })\n      .attr('title', (a) => { return a.fulltext; })\n      .style('cursor', 'pointer')\n      .on(\"click\", (a) => {\n        if (this._onNoteClick !== undefined) {\n          // d3.event.pageX, d3.event.pageY\n          this._onNoteClick(a.data);\n        }\n      });\n\n    // dynamically inserted elements -> rerun tooltip\n    $('.annotation').tooltip({\n      delay: { show: 100, hide: 0 },\n    });\n\n    const horzLineYOffset = 2;  \n\n    const lineColor = 'steelblue';\n\n    enterSelection\n      .insert(\"line\")\n      .attr('class', 'annotationVertLine')\n      .attr('x1', (a) => { return a.x; })\n      .attr('y1', (a) => { return a.ny + horzLineYOffset; })\n      .attr('x2', (a) => { return a.x; })\n      .attr('y2', (a) => { return a.y; })\n      .attr(\"stroke-width\", 0.5)\n      .attr(\"stroke\", lineColor);\n      \n    enterSelection\n      .insert(\"line\")\n      .attr('class', 'annotationHorizLine')\n      .attr('x1', (a) => { return a.x; })\n      .attr('y1', (a) => { return a.ny + horzLineYOffset; })\n      .attr('x2', (a) => { return a.x+ (a.note.label.length * this._annotationCharWidth); })\n      .attr('y2', (a) => { return a.ny + horzLineYOffset; })\n      .attr(\"stroke-width\", 0.5)\n      .attr(\"stroke\", lineColor);   \n\n  }\n\n\n}\n\n// CONCATENATED MODULE: ./src/zoom_buttons.js\n\n\nclass ZoomButtons {\n  \n  /**\n   * @param {d3-element} containerDiv The plot/svg is excepted to be in that div. \n   * Its 'position' should be 'relative', see https://stackoverflow.com/a/10487329\n   * so we can place the buttons in an absolute manner.\n   * @param {d3-element} zoomArea the element used for zooming\n   * @param {d3.zoom} d3Zoom \n   */\n  constructor(containerDiv, zoomArea, d3Zoom) {\n    const btnGroup = containerDiv.append('div');\n\n    const zoomInBtn = this._appendZoomButton(btnGroup, '+')\n      .on(\"click\", () => {\n        d3Zoom.scaleBy(zoomArea.transition().duration(10), 1.2);\n      });\n    const zoomInBtnWidth = parseInt(zoomInBtn.style('width'), 10);\n\n    const zoomOutBtn = this._appendZoomButton(btnGroup, '-')\n      .on(\"click\", () => {\n        d3Zoom.scaleBy(zoomArea.transition().duration(10), 0.8);\n      });\n    const zoomOutBtnWidth = parseInt(zoomOutBtn.style('width'), 10);\n\n    const zoomResetBtn = this._appendZoomButton(btnGroup, '[ ]')\n      .on(\"click\", () => {\n        d3Zoom.transform(zoomArea, d3.zoomIdentity.translate(0, 0).scale(1.0));\n      });\n    const zoomResetBtnWidth = parseInt(zoomResetBtn.style('width'), 10);\n\n    const zoomButtonsWidth = zoomInBtnWidth + zoomOutBtnWidth + zoomResetBtnWidth;\n\n    btnGroup.style('position', 'absolute') // see https://stackoverflow.com/a/10487329 -> \n                                           // parent position should be relative\n      .style('top', 0 + 'px')\n      .style('right', ( zoomButtonsWidth) + 'px');\n      \n  }\n\n  _appendZoomButton(container, text) {\n    return container.append('button')\n      .attr('class', 'zoomButton')\n      .html(text);\n  }\n}\n\n// CONCATENATED MODULE: ./src/session_timeline.js\n\n\n\n\n\n\n\nclass session_timeline_SessionTimeline {\n  constructor(commands, cmdFinalEndDate) {\n    this.cmdFinalEndDate = cmdFinalEndDate;\n\n    this._margin = {\n      top: 20,\n      right: 20,\n      bottom: 24,\n      left: 24,\n    };\n\n    // get the width in pixel of a character\n    this.annotationCharWidth = d3.select(\"#annotation_text_char\").node()\n      .getBoundingClientRect().width;\n    this.annotationCharHeight = d3.select(\"#annotation_text_char\").node()\n      .getBoundingClientRect().height;\n\n    // height of a session with no forks (parallel commands )\n    this.sessionBaseHeight = this.annotationCharHeight / 1.5;\n    this.sessionPadding = this.annotationCharHeight / 5;\n    // choose less than two, so two parallel commands\n    // are already wider than a lonely command.\n    this.sessionMinHeight = this.sessionBaseHeight / 1.5;\n\n    // An annotation shall only be displayed, if its minimum width in pixel\n    // is at least 5 character. Warning: do not set < 1 -> text rendering issues for annotations\n    this.annotationMinWidth = this.annotationCharWidth * 5;\n    // distance to the belonging command rect\n    this.annotationDistance = this.annotationCharHeight / 3.0;\n    this.commandRects = [];\n\n    \n    // minimum width of a cmd-rect. Let it be at least 1, otherwise very short commands\n    // are barely visible (get another color...)\n    this.CMD_MIN_WIDTH = 4;\n\n    this.svgWidth = windowWidth() - this._margin.left - this._margin.right - 30;\n\n    const plotContainer = d3.select('body').append('div')\n      .style('position', 'relative'); // see https://stackoverflow.com/a/10487329\n\n    this.svg = plotContainer.append('svg');\n    this._annotationRender = new annotation_line_render_AnnotationLineRender(this.svg);\n\n    const groupedSessions = this._generateCommandsPerSession(commands);\n    this.svgHeight = Math.max(100, this._prerenderSessions(groupedSessions));\n\n    this.xScale = d3.scaleTime()\n      .range([0, this.svgWidth]);\n\n    this._yScale = d3.scaleLinear()\n      .range([this.svgHeight, 0]);\n    this._yScale.domain([0, this.svgHeight]);\n\n    this.axisBottom = d3.axisBottom(this.xScale);\n\n    this.svg.attr('width', this._margin.left + this.svgWidth + this._margin.right)\n    .attr('height', this._margin.top + this.svgHeight + this._margin.bottom)\n    .append('g')\n    .attr('transform', 'translate(' + this._margin.left + ',' + this._margin.top + ')')\n    .style('z-index', -1);\n\n    const listenerRect = this.svg\n      .append('rect')\n      .attr('class', 'listener-rect')\n      .attr('x', 0)\n      .attr('y', -this._margin.top)\n      .attr('width', this._margin.left + this.svgWidth + this._margin.right)\n      .attr('height', this._margin.top + this.svgHeight + this._margin.bottom)\n      .style('opacity', 0);\n\n\n    this.xScale.domain([\n      // the commands are sorted by starttime...\n      commands[0].startTime,\n      this.cmdFinalEndDate,\n    ]).nice();\n\n    // draw axes\n    this.xAxisDraw = this.svg.insert('g', ':first-child')\n      .attr('class', 'x axis')\n      .attr('transform', 'translate(0,' + this.svgHeight + ')')\n      .call(this.axisBottom\n        // .ticks(d3.timeWeek, 2)\n        // .tickFormat(d3.timeFormat('%b %d'))\n      );\n\n    const _drawSession = (session, idx, lineIdx) => {\n      // draw rectangles\n      const className = 'sessionTimeSeries' + \n        session.getSessionGroup() + idx;\n      this.commandRects.push(this.svg.selectAll('.' + className)\n        .data(session.getCmdsWithMeta())\n        .enter()\n        .append('rect')\n        .attr('class', className)\n        .attr('x', (cmdWithMeta) => { \n          return this._calcRectXPosition(cmdWithMeta.cmd, this.xScale); \n        })\n        .attr('y', (cmdWithMeta) => { \n          // rects are drawn from top to bottom, so add the height:\n          return this._yScale(cmdWithMeta.getY() + cmdWithMeta.getHeight()); \n        })\n        .attr('width', (cmdWithMeta) => { \n          return this._calcRectWidth(cmdWithMeta.cmd, this.xScale); \n        })\n        .attr('height', (cmdWithMeta) => { \n          return cmdWithMeta.getHeight(); \n        })\n        .attr('fill', (cmdWithMeta) => { \n          // TODO: rather determine the session color in this class\n          // on a per line-basis, so the same color appears as seldom\n          // as possible in a given line (?).\n          // But what about the colors in the cmd-list?...\n          return cmdWithMeta.cmd.sessionColor; \n        } )\n        .style('cursor', 'pointer')\n        .attr('title', (cmdWithMeta) => { return cmdWithMeta.cmd.command; })\n        .on(\"click\", (cmdWithMeta) => { \n          commandList.scrollToCmd(cmdWithMeta.cmd); \n        })\n        );\n      $('.' + className).tooltip({\n        delay: { show: 50, hide: 0 },\n      });\n\n    }; \n\n    groupedSessions.forEach((sessionLine, lineIdx) => {\n      sessionLine.forEach((session, sessionIdx) => {\n        _drawSession(session, sessionIdx, lineIdx);\n      });\n    });   \n\n\n    this._preRenderAnnotations(groupedSessions);\n    this._annotationRender.setOnNoteClick((cmdWithMeta) => {\n      commandList.scrollToCmd(cmdWithMeta.cmd);\n    });\n    this._annotationRender.update(this.xScale);\n        \n\n    const minTimeMilli = 20000; // do not allow zooming beyond displaying 20 seconds\n    const maxTimeMilli = 6.3072e+11; // approx 20 years\n\n    const currentWidthMilli = cmdFinalEndDate - commands[0].startTime;\n\n    const minScaleFactor = currentWidthMilli / maxTimeMilli;\n    const maxScaleFactor = currentWidthMilli / minTimeMilli;\n\n    const zoom = d3.zoom()\n      // .scaleExtent([0.001, 5000])\n      .scaleExtent([minScaleFactor, maxScaleFactor])\n      .on(\"zoom\", () => {\n        this._handleZoom(d3.event.transform);\n      });\n\n    this._zoomButtons = new ZoomButtons(plotContainer, listenerRect, zoom);\n\n    listenerRect.call(zoom);\n  }\n\n  getSvg(){\n    return this.svg;\n  }\n\n  _generateCommandsPerSession(commands) {\n    const assignParallelCmdCounts = (commandsPerSession) => {\n      // find out the number of parallel commands in each session and store it \n      // in the meta-info of each cmd. The groups are already assigned, one command\n      // is parallel to another, if there exists at least one command\n      // between two zero-group-commands. Note that the groups of\n      // those in-between-commands may rise and fall arbitrarily often,\n      // so keep track of the max.\n      commandsPerSession.forEach((session) => {\n        // index in the sessions cmd-array, where the last group 0 was seen\n        let lastZeroGroupIdx = 0;\n        let lastHighestGroup = 0;\n        // yes, <= to simplify handling the final command\n        for (let i = 1; i <= session.getCmdsWithMeta().length; i++) {\n          if (i >= session.getCmdsWithMeta().length || \n              session.getCmdsWithMeta()[i].getGroup() === 0) {\n            // a new group has started or we are at end. Assign the found number of parallel\n            // commands to all affected commands:\n            const countOfParallelCmds = lastHighestGroup + 1; // zero based..\n\n            for (let j = lastZeroGroupIdx; j < i; j++) {\n              session.getCmdsWithMeta()[j].setCountOfParallelGroups(countOfParallelCmds);\n            }\n            // also keep track of the max number of parallel commands in this session\n            // for later use\n            session.setMaxCountOfParallelCommands(\n              Math.max(session.getMaxCountOfParallelCommands(), countOfParallelCmds)\n            );\n\n            lastZeroGroupIdx = i;\n            lastHighestGroup = 0;\n          } else {\n            // keep track of the highest group\n            lastHighestGroup = Math.max(lastHighestGroup, \n              session.getCmdsWithMeta()[i].getGroup());\n          }\n        }\n      });\n    };\n\n    const commandsPerSession = new MapExtended();\n\n    commands.forEach( (cmd) => {\n      // note: Map()' iteration order is the insert order, which is\n      // desired here -> since the command-array is ordered by startDateTime,\n      // the generated session map is also ordered by startDateTime\n      const session = commandsPerSession.getDefault(cmd.sessionUuid, \n        () => { return new session_timeline_Session(); });\n      session.addCmd(cmd);\n    });\n\n    assignParallelCmdCounts(commandsPerSession);\n\n    // assign a group to each session\n    const sessionGrpFind = new timeline_group_find_TimelineGroupFind();\n    let maxGroup = 0;\n    commandsPerSession.forEach((session) => {\n      const group = sessionGrpFind.findNextFreeGroup(session.getSessionStartDate(),\n        session.getSessionEndDate());\n\n      session.setSessionGroup(group);\n      maxGroup = Math.max(maxGroup, group);\n    });\n\n    // generate an array of an array of sessions, so all sessions which have\n    // the same group are in one array (in correct order).\n    // That way one 'line' of sessions can be \n    // drawn easily.\n    const groupedSessions = new Array(maxGroup + 1);\n    for (let i = 0; i < groupedSessions.length; i++) {\n      groupedSessions[i] = [];\n    }\n    commandsPerSession.forEach( (session) => {\n      groupedSessions[session.getSessionGroup()].push(session);\n    });\n\n    return groupedSessions;\n  }\n\n\n  /**\n   * @return {int} max y offset of the plot\n   * @param {*} groupedSessions \n   */\n  _prerenderSessions(groupedSessions){\n    const ANNOTATION_AND_PADDING = this.annotationDistance + \n      this.annotationCharHeight * 1.5; // * 1.5 -> give some more space\n\n    const _prerenderCmd = (cmdWithMeta, currentOffset, sessionHeight) => {\n      if(cmdWithMeta.getCountOfParallelGroups() === 1){\n        // non-parallel commands are aligned to session center:\n        cmdWithMeta.setHeight(this.sessionBaseHeight);\n        const y = currentOffset + sessionHeight/2 - this.sessionBaseHeight/2;\n        cmdWithMeta.setY(y);\n        return;\n      }\n      // parallel commands expand in equal parts over the whole sessionHeight \n      // (separated by padding)\n      let cmdHeight = sessionHeight / cmdWithMeta.getCountOfParallelGroups();\n      if(cmdHeight < this.sessionMinHeight){\n        cmdHeight = this.sessionMinHeight;\n      } else {\n        cmdHeight -= this.sessionPadding;\n      }\n      cmdWithMeta.setHeight(cmdHeight);\n      const y = currentOffset + (cmdHeight + this.sessionPadding) * cmdWithMeta.getGroup();\n      cmdWithMeta.setY(y);\n    };\n\n    let currentOffset = 0;\n    groupedSessions.forEach((sessionLine, lineIdx) => {\n      // find the max. number of parallel commands in all sessions of the current line:\n      const maxNumberOfParallelCmds = sessionLine.reduce((prev, curr) => {\n        return prev.getMaxCountOfParallelCommands() > curr.getMaxCountOfParallelCommands() ?\n         prev : curr;\n      }).getMaxCountOfParallelCommands();    \n      const sessionHeight = maxNumberOfParallelCmds === 1 ?\n       this.sessionBaseHeight :\n       (this.sessionMinHeight + this.sessionPadding) * maxNumberOfParallelCmds;\n\n      sessionLine.forEach((session) => {\n        session.getCmdsWithMeta().forEach((cmdWithMeta) => {\n          _prerenderCmd(cmdWithMeta, currentOffset, sessionHeight);\n        });\n        session.setHeight(sessionHeight);\n        session.setY(currentOffset);\n      });\n      currentOffset += sessionHeight + ANNOTATION_AND_PADDING;\n    });    \n    return currentOffset;    \n  }\n\n  _preRenderAnnotations(groupedSessions){\n    groupedSessions.forEach((sessionLine) => { \n      const annotationGroup = [];\n      sessionLine.forEach((session) => {\n        session.getCmdsWithMeta().forEach((cmdWithMeta) => {\n          // only create annotations for the topmost commandgroup \n          // (in case of parallel commands)\n          if(cmdWithMeta.getCountOfParallelGroups() === cmdWithMeta.getGroup() + 1){\n            annotationGroup.push(this._createAnnotation(cmdWithMeta, \n              session.getY() + session.getHeight() + this.annotationDistance ));\n          }\n        });\n      });\n      this._annotationRender.addAnnotationGroup(annotationGroup);\n    });    \n  }\n  \n\n  _createAnnotation(cmdWithMeta, y){\n    return {\n      data: cmdWithMeta,\n      note: {\n        align: \"left\", \n        wrap: 'nowrap',\n        // title: \"Annotation title\"\n      },\n      dx: 0,\n      ny: this.svgHeight - y,\n      y: this.svgHeight - (cmdWithMeta.getY() + cmdWithMeta.getHeight()),\n      startX: cmdWithMeta.cmd.startTime,\n      endX: cmdWithMeta.cmd.endTime,\n      fulltext: cmdWithMeta.cmd.command,\n    };\n  }\n\n\n  _calcRectXPosition(cmd, xScale) {\n    let startX = xScale(cmd.startTime);\n    const w = xScale(cmd.endTime) - startX;\n    if (w < this.CMD_MIN_WIDTH) {\n      // since a cmd has to have at least that width, but shall be\n      // centered anyway:\n      const center = startX + w / 2.0;\n      startX = center - this.CMD_MIN_WIDTH / 2.0;\n    }\n    return startX;\n  }\n\n\n  _calcRectWidth(cmd, xScale) {\n    const w = xScale(cmd.endTime) - xScale(cmd.startTime);\n    if (w < this.CMD_MIN_WIDTH) {\n      return this.CMD_MIN_WIDTH;\n    }\n    return w;\n  }\n\n\n  _handleZoom(transform) {\n    const xScaleNew = transform.rescaleX(this.xScale);\n\n    this.axisBottom.scale(xScaleNew);\n    this.xAxisDraw.call(\n      this.axisBottom\n      // .ticks(d3.timeWeek, 2)\n      // .tickFormat(d3.timeFormat('%b %d'))\n    );\n    // maybe_todo: execute in parallel...\n    this.commandRects.forEach((rectGroup) => {\n      rectGroup.attr('x', (cmdWithMeta) => {\n        const pos = this._calcRectXPosition(cmdWithMeta.cmd, xScaleNew);\n        // note: pos may be less than zero which is ok, because\n        // otherwise wide rects may disappear too soon.\n        return pos;\n        })\n        .attr('width', (cmdWithMeta) => {\n          return this._calcRectWidth(cmdWithMeta.cmd, xScaleNew);\n        });\n    });\n\n    this._annotationRender.update(xScaleNew);\n  }\n}\n\n\nclass _CommandWithMeta{\n  /**\n   * \n   * @param {Command} cmd \n   * @param {int} group the group assigned within a session\n   */\n  constructor(cmd, group){\n    this.cmd = cmd;\n    this._group = group;\n    this._countOfParallelGroups = -1;\n    this._height = 1000;\n    this._y = 0;\n    this._annotation = null;\n  }\n\n  getGroup(){\n    return this._group;\n  }\n\n  setCountOfParallelGroups(val){\n    this._countOfParallelGroups = val;\n  }\n\n  getCountOfParallelGroups(){\n    return this._countOfParallelGroups;\n  }\n\n  setHeight(val){\n    this._height = val;\n  }\n\n  getHeight(){\n    return this._height;\n  }\n\n  setY(val){\n    this._y = val;\n  }\n\n  getY(){\n    return this._y;\n  }\n\n  setAnnotation(val){\n    this._annotation = val;\n  }\n\n  getAnnotation(){\n    return this._annotation;\n  }\n\n}\n\nclass session_timeline_Session {\n  constructor() {\n    this._cmdsWithMeta = [];\n    this._finalCmdEndDate = DATE_MIN;\n    this._groupFind = new timeline_group_find_TimelineGroupFind();\n    this._firstCmdStartDate = null;\n    this._sessionGroup = null;\n    this._maxCountOfParallelCmds = null;\n    this._height = null;\n  }\n\n  /**\n   * The passed commands *must* be sorted (asc) by startTime during\n   * subsequent calls of this method.\n   * @param {Command} cmd \n   */\n  addCmd(cmd) {\n    if(this._firstCmdStartDate === null){\n      // commands are sorted by startTime and we are called the first time.\n      this._firstCmdStartDate = cmd.startTime;\n    }\n    // commands are sorted by startTime but the first executed cmd may well finish\n    // last, so incrementally find the final endDate.\n    this._finalCmdEndDate = date_max(cmd.endTime, this._finalCmdEndDate);\n\n    const group = this._groupFind.findNextFreeGroup(cmd.startTime, cmd.endTime);\n    this._cmdsWithMeta.push(new _CommandWithMeta(cmd, group));\n\n  }\n\n  setMaxCountOfParallelCommands(val){\n    this._maxCountOfParallelCmds = val;\n  }\n\n  getMaxCountOfParallelCommands(){\n    return this._maxCountOfParallelCmds;\n  }\n\n\n  getSessionStartDate(){\n    return this._firstCmdStartDate;\n  }\n\n  getSessionEndDate(){\n    return this._finalCmdEndDate;\n  }\n\n  setSessionGroup(val){\n    this._sessionGroup = val;\n  }\n\n  getSessionGroup(){\n    return this._sessionGroup;\n  }\n\n  getCmdsWithMeta(){\n    return this._cmdsWithMeta;\n  }\n\n  setHeight(val){\n    this._height = val;\n  }\n\n  getHeight(){\n    return this._height;\n  }\n\n  setY(val){\n    this._y = val;\n  }\n\n  getY(){\n    return this._y;\n  }\n\n\n}\n\n// CONCATENATED MODULE: ./src/d3js_util.js\n\n\n/**\n * Wrap long axis labels to mutliple lines by maximum width, splitting words by \n * the given *delimeter-keeping* splitStr and auto-truncating long words.\n * Leading and trailing whitespaces of each line are trimmed.\n * @param {[String]} tickTexts \n * @param {int} width The max. width in pixels for each label \n * @param {RegExp} splitStr A regular expression for the split-string, \n * which keeps the delimeter, e.g. /(?=\\s)/.\n */\nfunction wrapTextLabels(tickTexts, width, splitStr=/(?=\\s)/) {\n  tickTexts.each(function() {\n    const text = d3.select(this);\n    const words = text.text().split(splitStr);\n    let line = [];\n    let lineNumber = 0;\n    const lineHeight = 1.1; // ems\n    const y = text.attr(\"y\");\n    const dy = parseFloat(text.attr(\"dy\"));\n    let tspan = text.text(null).append(\"tspan\").attr(\"x\", 0).attr(\"y\", y)\n      .attr(\"dy\", dy + \"em\");\n    // only increment i, if the word can be for sure drawn to current line\n    for(let i=0; i < words.length; ) {\n      line.push(words[i]);\n      tspan.text(line.join(''));\n      if (tspan.node().getComputedTextLength() <= width) {\n        ++i;\n        continue;\n      }\n\n      if (line.length === 1) {\n        // this single word is too long to fit into a line -> clip it\n        _truncateAndSetLabelTxt(line[0].trim(), tspan, width);\n        ++i;    \n      } else {\n        // this word does not fit any more -> put it to next line \n        // and render all others\n        line.pop();\n        tspan.text(line.join('').trim());\n        \n        // NO ++i -> the current word must be rendered in next line\n      } \n      tspan = text.append(\"tspan\").attr(\"x\", 0).attr(\"y\", y)\n        .attr(\"dy\", `${++lineNumber * lineHeight + dy}em`).text(null);\n      line = [];\n    }\n  });\n}\n\n\nfunction _truncateAndSetLabelTxt(labelTxt, tspan, width) {\n  do {\n    labelTxt = labelTxt.slice(0, -3);\n    tspan.text(labelTxt);\n  } while (tspan.node().getComputedTextLength() > width);\n  labelTxt = labelTxt.slice(0, -2);\n  labelTxt += '..';\n  tspan.text(labelTxt);\n}\n\n// CONCATENATED MODULE: ./src/plot_simple_bar.js\n\n\n\n\n\n/**\n * Base class for several bar plots\n */\nclass plot_simple_bar_PlotSimpleBar {\n  constructor() {\n    this._margin = { top: 20, right: 20, bottom: 60, left: 40 };\n    this._width = 500 - this._margin.left - this._margin.right;\n    this._height = 300 - this._margin.top - this._margin.bottom;\n\n    this._maxBarWidth = 30;\n  }\n\n  generatePlot(data, siblingElement) {\n    const plotContainer = siblingElement.append('div')\n      .style('position', 'relative')\n      .style('padding', '12px')\n      .style('display', 'inherit');\n      \n\n    this._svg = plotContainer.append(\"svg\")\n      .attr(\"width\", this._width + this._margin.left + this._margin.right)\n      .attr(\"height\", this._height + this._margin.top + this._margin.bottom)\n      .append(\"g\")\n      .attr(\"transform\",\n        \"translate(\" + this._margin.left + \",\" + this._margin.top + \")\");\n\n    // chart title\n    const chartTitle = this._svg.append(\"text\")\n      .attr(\"x\", (this._width / 2.0))\n      .attr(\"y\", -3)\n      .attr(\"text-anchor\", \"middle\")\n      .style(\"font-size\", \"16px\")\n      .style(\"text-decoration\", \"underline\")\n      .text(this._chartTitle());\n\n    this._xScaleBand = d3.scaleBand()\n      .range([0, this._width])\n      .padding(0.1);\n    this._yScaleBand = d3.scaleLinear()\n      // leave some space for the char title:\n      .range([this._height, chartTitle.node().getBoundingClientRect().height * 1.2]);\n\n    // In case of duplicate x-axis label values they are overridden, which should\n    // never be desired. Instead build a range and access the respective data-array-element\n    // by index.\n    this._xScaleBand.domain(d3.range(data.length));\n    this._yScaleBand.domain(this._yScaleBandDomain());\n\n    const actualBandWidth = (this._xScaleBand.bandwidth() > this._maxBarWidth) ? \n      this._maxBarWidth :\n      this._xScaleBand.bandwidth();\n\n\n    // append the rectangles for the bar chart\n\n    const dataEnterSelection = this._svg.selectAll(\".bar\").data(data).enter();\n\n    const bars = dataEnterSelection.append(\"rect\")\n      .style('fill',(d, i) => { return this._barColor(d); })\n      .attr(\"x\", (d, i) => { \n        let x = this._xScaleBand(i);\n        const center = x + this._xScaleBand.bandwidth()/2.0;\n        x = center - actualBandWidth/2.0;\n        return x;\n       })\n      .attr(\"width\", actualBandWidth) \n      .attr(\"y\", (d) => { return this._yScaleBand(this._yValue(d)); })\n      .attr(\"height\", (d) => { return this._height - this._yScaleBand(this._yValue(d)); })\n      .attr('data-toggle', 'tooltip')\n      .attr('title', (d) => { return this._barTooltipTxt(d); });\n  \n    this._modifyBars(bars);\n      \n    // add the x Axis\n    this._svg.append(\"g\")\n      .attr(\"transform\", \"translate(0,\" + this._height + \")\")\n      .call(d3.axisBottom(this._xScaleBand).tickFormat((d,i)=> this._xValue(data[i])))\n      .selectAll(\"text\")\n      .call((tickTexts) => {\n        const thisPlot = this;\n        tickTexts.each(function (plainTxt, idx) {\n          const text = d3.select(this);\n          text.attr(\"title\", function () {\n            return thisPlot._xAxisTooltipTxt.call(thisPlot, data[idx]);\n          }).attr('data-toggle', 'tooltip')\n            .attr('data-placement', 'left');\n          thisPlot._modifyTickText(text, data[idx]);  \n        });\n\n        wrapTextLabels(tickTexts, \n          this._xScaleBand.bandwidth(), \n          this._xTxtLabelSplitStr());  \n      });\n\n    // add the y Axis\n    const yAxisTicks = this._yScaleBand.ticks()\n      .filter((tick) => { return this._yAxisTicksFilter(tick); });\n    this._yaxis = d3.axisLeft(this._yScaleBand);\n    const yTickFormat = this._yAxisTickFormat();\n    if(yTickFormat !== undefined){\n      this._yaxis.tickValues(yAxisTicks).tickFormat( yTickFormat );\n    }\n\n    this._svg.append(\"g\").call(this._yaxis);      \n  }\n\n  \n  // MUST override methods\n  _chartTitle(){ throw new ErrorNotImplemented(); }\n  _yScaleBandDomain(){ throw new ErrorNotImplemented(); }\n  // Is called for each x-value\n  _xValue(d){ throw new ErrorNotImplemented(); }\n  // Is called for each y-value\n  _yValue(d){ throw new ErrorNotImplemented(); }\n\n  // MAY override methods\n  _yAxisTicksFilter(tick){ return true; }\n  _yAxisTickFormat() { return undefined; }\n  _modifyTickText(tickTxt, data) {}\n\n  _xTxtLabelSplitStr() { return /(?=\\s)/; }  \n  _barTooltipTxt(dataElement){\n    return this._xValue(dataElement);\n  }\n  _xAxisTooltipTxt(dataElement){\n    return this._xValue(dataElement);\n  }\n  _barColor(dataElement){\n    return 'steelblue';\n  }\n  // apply further modifications to the bars\n  _modifyBars(bars){}\n\n}\n\n\n// CONCATENATED MODULE: ./src/plot_most_written_files.js\n\n\n\n\n\n\n/**\n * A bar plot displaying the commands which\n * modified the most files.\n */\nclass plot_most_written_files_PlotMostWrittenFiles extends plot_simple_bar_PlotSimpleBar {\n\n  generatePlot(commands, siblingElement){\n    this._filteredCmds = [];\n    mostFileMods.forEach((e) => {\n      this._filteredCmds.push(commands[e.idx]);\n    });\n    this._maxCountOfWfileEvents = this._filteredCmds[0].fileWriteEvents_length;\n\n    // Be consistent with timeline and sort by date:\n    this._filteredCmds.sort(compareStartDates);\n\n    super.generatePlot(this._filteredCmds, siblingElement);\n\n  }\n  /**\n   * @override\n   */\n  _chartTitle(){ return 'Commands with most file-modifications'; }\n\n  /**\n   * @override\n   */  \n  _yScaleBandDomain(){ return [0, this._maxCountOfWfileEvents]; }\n\n  /**\n   * @override\n   */  \n  _xValue(cmd) {\n    return humanDateFormatOnlyDate(cmd.startTime) + \": \" +\n      cmd.command;\n  }  \n\n  /**\n   * @override\n   */  \n  _yValue(cmd) {\n    return cmd.fileWriteEvents_length;\n  }  \n\n  /**\n   * @override\n   */  \n  _yAxisTicksFilter(tick){ return Number.isInteger(tick); }\n\n  /**\n   * @override\n   */  \n  _yAxisTickFormat() { return d3.format('d'); }\n\n\n  _barColor(cmd){\n    return cmd.sessionColor;\n  }\n\n  _modifyBars(bars){\n    bars\n      .style('cursor', 'pointer')\n      .on(\"click\", (cmd) => { \n        commandList.scrollToCmd(cmd); \n      });\n  }\n\n  _modifyTickText(tickTxt, cmd) {\n    tickTxt\n      .style('cursor', 'pointer')\n      .on(\"click\", () => { \n        commandList.scrollToCmd(cmd); \n      });\n  }\n}\n\n\n\n// CONCATENATED MODULE: ./src/plot_cmdcount_per_cwd.js\n\n\n\n\n/**\n * A bar plot displaying the working directories\n * where the most commands were executed.\n */\nclass plot_cmdcount_per_cwd_PlotCmdCountPerCwd extends plot_simple_bar_PlotSimpleBar {\n\n  generatePlot(commands, siblingElement){\n    super.generatePlot(cwdCmdCounts, siblingElement);\n\n  }\n  /**\n   * @override\n   */\n  _chartTitle(){ return 'Working directories with most commands'; }\n\n\n  /**\n   * @override\n   */  \n  _yScaleBandDomain(){ return [0, cwdCmdCounts[0].countOfCommands]; }\n\n  /**\n   * @override\n   */  \n  _xValue(cwdCmdCount) {\n    return cwdCmdCount.workingDir;\n  }  \n\n  /**\n   * @override\n   */  \n  _yValue(cwdCmdCount) {\n    return cwdCmdCount.countOfCommands;\n  }  \n\n  /**\n   * @override\n   */  \n  _yAxisTicksFilter(tick){ return Number.isInteger(tick); }\n\n  /**\n   * @override\n   */  \n  _yAxisTickFormat() { return d3.format('d'); }\n\n\n  _xTxtLabelSplitStr() { return /(?=\\/)/; }\n\n}\n\n\n\n// CONCATENATED MODULE: ./src/plot_io_per_dir.js\n\n\n\n\n/**\n * A bar plot displaying directories\n * with most IO-activity.\n */\nclass plot_io_per_dir_PlotIoPerDir extends plot_simple_bar_PlotSimpleBar {\n\n  generatePlot(commands, siblingElement){\n    super.generatePlot(dirIoCounts, siblingElement);\n\n  }\n  /**\n   * @override\n   */\n  _chartTitle(){ return 'Directories with most input-output-activity'; }\n\n\n  /**\n   * @override\n   */  \n  _yScaleBandDomain(){ return [0, dirIoCounts[0].readCount + dirIoCounts[0].writeCount]; }\n\n  /**\n   * @override\n   */  \n  _xValue(ioStat) {\n    return ioStat.dir;\n  }  \n\n  /**\n   * @override\n   */  \n  _yValue(ioStat) {\n    return ioStat.readCount + ioStat.writeCount;\n  }  \n\n  /**\n   * @override\n   */  \n  _yAxisTicksFilter(tick){ return Number.isInteger(tick); }\n\n  /**\n   * @override\n   */  \n  _yAxisTickFormat() { return d3.format('d'); }\n  \n  /**\n   * @override\n   */  \n  _xTxtLabelSplitStr() { return /(?=\\/)/; }\n}\n\n// CONCATENATED MODULE: ./src/plot_cmdcount_per_session.js\n\n\n\n\n\n/**\n * A bar plot displaying the sessions\n * wherein the most commands were executed.\n */\nclass plot_cmdcount_per_session_PlotCmdCountPerSession extends plot_simple_bar_PlotSimpleBar {\n\n  generatePlot(commands, siblingElement) {\n      this._sessionMostCmds = [];\n      sessionsMostCmds.forEach((e) => {\n        this._sessionMostCmds.push(\n          new _SessionMostCmdsEntry(commands[e.idxFirstCmd], e.countOfCommands)\n          );\n      });\n      this._maxCountOfCmdsInSession = this._sessionMostCmds[0].countOfCommands;\n    \n    // sort the sessions by start date\n    this._sessionMostCmds.sort((s1, s2) => {\n      return s1.firstCmd.startTime - s2.firstCmd.startTime;\n    });\n\n    super.generatePlot(this._sessionMostCmds, siblingElement);\n\n  }\n  /**\n   * @override\n   */\n  _chartTitle(){ return 'Sessions with most commands'; }\n\n\n  /**\n   * @override\n   */  \n  _yScaleBandDomain(){ return [0, this._maxCountOfCmdsInSession]; }\n\n  /**\n   * @override\n   */  \n  _xValue(session) {\n    return session.firstCmd.sessionUuid;\n  }  \n\n  /**\n   * @override\n   */  \n  _yValue(session) {\n    return session.countOfCommands;\n  }  \n\n  /**\n   * @override\n   */  \n  _yAxisTicksFilter(tick){ return Number.isInteger(tick); }\n\n  /**\n   * @override\n   */  \n  _yAxisTickFormat() { return d3.format('d'); }\n\n\n  /**\n   * @return {int}\n   * @param {[Command]} cmds1 \n   * @param {[Command]} cmds2 \n   */\n  _compareBySessionCmdCount(cmds1, cmds2) {\n    return cmds1.length - cmds2.length;\n  }\n\n  _barColor(session){\n    return session.firstCmd.sessionColor;\n  }\n\n  _modifyBars(bars){\n    bars\n      .style('cursor', 'pointer')\n      .on(\"click\", (session) => { \n        commandList.scrollToCmd(session.firstCmd); \n      });\n  }\n\n  _modifyTickText(tickTxt, session) {\n    tickTxt\n      .style('cursor', 'pointer')\n      .on(\"click\", () => { \n        commandList.scrollToCmd(session.firstCmd); \n      });\n  }\n\n}\n\n\nclass _SessionMostCmdsEntry {\n  constructor(firstCmd, countOfCommands){\n    this.firstCmd = firstCmd;\n    this.countOfCommands = countOfCommands;\n  }\n}\n\n\n\n// CONCATENATED MODULE: ./src/stats.js\n\n\n\n\n\n\n\n\nasync function generateMiscStats() {\n  const body = d3.select('body');\n\n  if (typeof commands[0].fileWriteEvents === 'undefined') {\n    // when generating from shournal, command-data (like fileWriteEvents)\n    // is loaded later for performance reasons\n    await timedForEach(commands, (cmd, idx) => {\n      const cmdDataTag = d3.select('#commandDataJSON' + idx);\n      const cmdData = JSON.parse(cmdDataTag.html());\n      Object.assign(cmd, cmdData);\n      cmdDataTag.remove();\n    });\n  }\n\n  if (mostFileMods.length === 0 && sessionsMostCmds.length === 0 && \n      cwdCmdCounts.length === 0 && dirIoCounts.length === 0) {\n    // No stats to display...\n    return;\n  }\n\n  body.append('h3')\n    .html('Miscellaneous statistics')\n    .style('padding-top', '1em');\n\n  const miscStatElement = body.append('div')\n    .style('padding-top', '20px')\n    .style('display', 'inline-block');\n\n  if (mostFileMods.length > 0) {\n    const plotMostWrittenFiles = new plot_most_written_files_PlotMostWrittenFiles();\n    plotMostWrittenFiles.generatePlot(commands, miscStatElement);\n  }  \n  \n  if (sessionsMostCmds.length > 0) {\n    const plotCmdCountPerSession = new plot_cmdcount_per_session_PlotCmdCountPerSession();\n    plotCmdCountPerSession.generatePlot(commands, miscStatElement);\n  }\n\n  if(cwdCmdCounts.length > 0){\n    const plotCmdCountPerCwd = new plot_cmdcount_per_cwd_PlotCmdCountPerCwd();\n    plotCmdCountPerCwd.generatePlot(commands, miscStatElement);\n  }\n \n  if (dirIoCounts.length > 0) {\n    const plotIoPerDir = new plot_io_per_dir_PlotIoPerDir();\n    plotIoPerDir.generatePlot(commands, miscStatElement);\n  }\n\n  $('[data-toggle=\"tooltip\"]').tooltip({\n    delay: { show: 300, hide: 0 },\n  });\n}\n\n// CONCATENATED MODULE: ./src/index.js\n\n\n\n\n\n\n\n\nfunction displayErrorAtTop(msg){\n  // vanilla js, since loading of libraries might have failed\n  const errEl = document.getElementById('topError');\n  errEl.style[\"visibility\"] = \"visible\";\n  errEl.innerHTML = msg;\n}\n\n\nfunction main() {\n  if (scriptLoadError) {\n    console.log(scriptLoadError);\n    displayErrorAtTop(scriptLoadError);\n    return;\n  }\n\n  init();\n\n  assert(commands.length > 0, 'commands.length > 0');\n\n  const queryDate = d3TimeParseIsoWithMil(ORIGINAL_QUERY_DATE_STR);\n  const body = d3.select('body');\n\n  body.append('button')\n    .attr('class', 'btn btn-primary')\n    .style('position', 'absolute')\n    .style('right', '0px')\n    .style('top', '0px')\n    .html(\"Report Metadata\")\n    .on(\"click\", () => {\n      textDialog.show(\"Report Metadata\", \n        `Commandline-query (executed on ` + \n        `${humanDateFormat(queryDate)}): ${ORIGINAL_QUERY}`);\n    });\n\n  prepareCommands(commands);\n\n  {\n    let lastStart = commands[0].startTime;\n    for(let i=1; i < commands.length; i++){\n      assert(commands[i].startTime >= lastStart);\n      lastStart = commands[i].startTime;\n    }\n  }\n  \n\n  const cmdFinalEndDate = d3TimeParseIsoWithMil(CMD_FINAL_ENDDATE_STR);\n \n  // Do not change order -> commandList.size computed based on sessionTimeLine.size.\n  sessionTimeline = new session_timeline_SessionTimeline(commands, cmdFinalEndDate);\n  commandList = new command_list_CommandList(commands);\n  \n  d3.select('#initialSpinner').remove();\n  $(document).ready(generateMiscStats);\n}\n\n\ntry {\n  main();\n} catch (error) {\n  console.log(error);\n  displayErrorAtTop(error);\n}\n\n\n/***/ })\n/******/ ]);"
  },
  {
    "path": "html-export/dist/main.licenses.txt",
    "content": "async-mutex\nMIT\nThe MIT License (MIT)\n\nCopyright (c) 2016 Christian Speckner <cnspeckn@googlemail.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\ntinyqueue\nISC\nISC License\n\nCopyright (c) 2017, Vladimir Agafonkin\n\nPermission to use, copy, modify, and/or distribute this software for any purpose\nwith or without fee is hereby granted, provided that the above copyright notice\nand this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS\nOF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER\nTORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF\nTHIS SOFTWARE.\n"
  },
  {
    "path": "html-export/package.json",
    "content": "{\n  \"name\": \"shournal-html-stats\",\n  \"version\": \"1.0.0\",\n  \"description\": \"interactively browse shournal's command history\",\n  \"private\": true,\n  \"scripts\": {\n    \"build\": \"webpack\"\n  },\n  \"keywords\": [],\n  \"author\": \"Tycho Kirchner\",\n  \"license\": \"GPL-3.0\",\n  \"devDependencies\": {\n    \"eslint\": \"^6.5.1\",\n    \"eslint-config-google\": \"^0.14.0\",\n    \"license-webpack-plugin\": \"^2.1.3\",\n    \"webpack\": \"^4.41.1\",\n    \"webpack-cli\": \"^3.3.9\"\n  },\n  \"dependencies\": {\n    \"async-mutex\": \"^0.1.4\",\n    \"tinyqueue\": \"^2.0.3\"\n  }\n}\n"
  },
  {
    "path": "html-export/src/annotation_line_render.js",
    "content": "\n\nimport * as util from './util';\nimport {sleep} from './util';\nimport {Mutex} from 'async-mutex';\n\n/**\n * Render Groups of annotations on a per-line-basis. Clip annotation texts \n * and omit annotations as needed to fit into available space\n */\nexport default class AnnotationLineRender {\n  constructor(plot) {\n    this._annotationGroups = [];\n    this._plot = plot;\n    // get the width in pixel of a character\n    this._annotationCharWidth = d3.select(\"#annotation_text_char\").node()\n      .getBoundingClientRect().width;\n    // do not render an annotation which does not fit into the space.\n    this._annotationMinWidth = this._annotationCharWidth * 2;\n    // clip annotation-texts after that many characters\n    this._annotationMaxNumChars = 15;\n    this._updateMutex = new Mutex();\n    this._lastUpdateDummy = null;\n  }\n\n  /**\n   * \n   * @param {Array<Annotation>} group: ordered set of annotations which will be rendered\n   * within the same line. Base class is the same as d3 annotation, however, the following\n   * *additional* fields must be set: startX, endX, fulltext. The annotation position\n   * (x,y) has to be set already, based on the x-values it is decided, how much of\n   * an annotation is drawn.\n   */\n  addAnnotationGroup(group) {\n    this._annotationGroups.push(group);\n  }\n\n\n  async update(xScale) {\n    this._lastUpdateDummy = {};\n    const currentUpdateDummy = this._lastUpdateDummy;\n\n    const release = await this._updateMutex.acquire();\n    try {\n      // remove and add again seems to be faster than updating\n      this._plot.selectAll('.annotation').remove();\n      this._plot.selectAll('.annotationVertLine').remove();\n      this._plot.selectAll('.annotationHorizLine').remove();\n\n      const annotations = await this._preRenderAnnotations(xScale, currentUpdateDummy);\n      if (annotations !== null) {\n        this._appendAnnotations(annotations);\n      }\n    } finally {\n      release();\n    }\n  } \n\n  setOnNoteClick(func){\n    this._onNoteClick = func;\n  }\n\n  // ***************** PRIVATE ********************\n\n  _compareStartX(prev, current) {\n    return prev.startX - current.startX;\n  }  \n\n  _compareEndX(prev, current) {\n    return prev.endX - current.endX;\n  }  \n\n  async _preRenderAnnotations(xScale, currentUpdateDummy ) {  \n    \n    const annotations = [];\n    // uniform interface for binary search, where the entrance indeces are found\n    const dummyAnnotation = {\n      startX: xScale.domain()[0],\n      endX: xScale.domain()[1],\n    };\n\n    const plotWidth = this._plot.node().getBBox().width;\n    for(const annotationLine of this._annotationGroups) {\n      if (annotationLine.length == 0) {\n        continue;\n      }\n      // Do not render annotations outside the current view\n      // -> find start and stop indeces in the group:\n      // Note: one cannot simply choose 0 and length -1 after zooming\n      // out, because panning also has to be respected.\n      const startIdx = util.binarySearch(annotationLine, dummyAnnotation, \n        this._compareStartX, true);\n      const endIdx = util.binarySearch(annotationLine, dummyAnnotation, \n        this._compareEndX, true);\n\n      let displayAnnotation = annotationLine[startIdx];\n      displayAnnotation.x = this._calcAnnotationCenter(displayAnnotation, xScale);\n\n      for (let idx = startIdx + 1; idx <= endIdx; idx++) {\n        // this.update is run async: check if it was called in between. If that's the\n        // case we can abort, because or xScale is outdated.\n        if (currentUpdateDummy !== this._lastUpdateDummy) {\n          return null;\n        }  \n        \n        if(idx % 30 === 0){\n          // avoid freezing the DOM...\n          await sleep(5);\n        }        \n\n        const annotation = annotationLine[idx];\n        annotation.x = this._calcAnnotationCenter(annotation, xScale);\n\n        const textspace = annotation.x - displayAnnotation.x -\n          (this._annotationCharWidth * 2); // subtract more chars to leave space to next annotation\n        const annotationTxt = this._generateAnnotationTxt(textspace, displayAnnotation.fulltext);\n        if (annotationTxt == null) {\n          // do not render this annotation\n          continue;\n        }\n        // always update text, we might have zoomed before!\n        displayAnnotation.note.label = annotationTxt;\n        annotations.push(displayAnnotation);\n        displayAnnotation = annotation;\n      }\n\n      // still need to push the final annotation, if it fits into our plot\n      const textspace = plotWidth - displayAnnotation.x;\n      const annotationTxt = this._generateAnnotationTxt(textspace, displayAnnotation.fulltext);\n      if (annotationTxt != null) {\n        displayAnnotation.note.label = annotationTxt;\n        annotations.push(displayAnnotation);\n      }\n    }\n    return annotations;\n  }\n\n  _calcAnnotationCenter(annotation, xScale) {\n    return (xScale(annotation.startX) + xScale(annotation.endX)) / 2.0;\n  }\n\n    /**\n   * @param {*} textspace Available width in pixel\n   * @param {*} txt The full text\n   * @return {*} null, if textspace was too small, else the full or clipped text\n   */\n  _generateAnnotationTxt(textspace, txt) {\n    if (textspace < this._annotationMinWidth) {\n      return null;\n    }\n\n    // Render only so many chars that fit into the space, but not more than\n    // _annotationMaxNumChars;\n    const maxCountOfRenderChars = Math.min(Math.ceil(textspace / this._annotationCharWidth) , \n      this._annotationMaxNumChars);\n    \n    if (txt.length <= maxCountOfRenderChars ) {\n      return txt;\n    }\n    return txt.substring(0, maxCountOfRenderChars - 1) + '.';\n  }\n\n\n  /**\n   * Append all annotations to the plot and setup mouse event handlers\n   * @param {[annotation]} annotations\n   */\n  _appendAnnotations(annotations) {\n    const enterSelection = this._plot.selectAll(\".annotation\")\n      .data(annotations)\n      .enter();\n\n    enterSelection\n      .append(\"text\")\n      .attr('class', 'annotation unselectable' )\n      .attr('x', (a) => { return a.x; })\n      .attr('y', (a) => { return a.ny; })\n      .text((a) => { return a.note.label; })\n      .attr('title', (a) => { return a.fulltext; })\n      .style('cursor', 'pointer')\n      .on(\"click\", (a) => {\n        if (this._onNoteClick !== undefined) {\n          // d3.event.pageX, d3.event.pageY\n          this._onNoteClick(a.data);\n        }\n      });\n\n    // dynamically inserted elements -> rerun tooltip\n    $('.annotation').tooltip({\n      delay: { show: 100, hide: 0 },\n    });\n\n    const horzLineYOffset = 2;  \n\n    const lineColor = 'steelblue';\n\n    enterSelection\n      .insert(\"line\")\n      .attr('class', 'annotationVertLine')\n      .attr('x1', (a) => { return a.x; })\n      .attr('y1', (a) => { return a.ny + horzLineYOffset; })\n      .attr('x2', (a) => { return a.x; })\n      .attr('y2', (a) => { return a.y; })\n      .attr(\"stroke-width\", 0.5)\n      .attr(\"stroke\", lineColor);\n      \n    enterSelection\n      .insert(\"line\")\n      .attr('class', 'annotationHorizLine')\n      .attr('x1', (a) => { return a.x; })\n      .attr('y1', (a) => { return a.ny + horzLineYOffset; })\n      .attr('x2', (a) => { return a.x+ (a.note.label.length * this._annotationCharWidth); })\n      .attr('y2', (a) => { return a.ny + horzLineYOffset; })\n      .attr(\"stroke-width\", 0.5)\n      .attr(\"stroke\", lineColor);   \n\n  }\n\n\n}\n"
  },
  {
    "path": "html-export/src/command_list.js",
    "content": "import * as html_util from './html_util';\nimport * as util from './util';\nimport * as globals from './globals';\nimport * as conversions from './conversions';\n\n\nexport default class CommandList {\n  constructor(commands) {\n\n    this._CMDLISTPADDING = 18;\n    this._CMDLISTBG = '#777';\n\n    const cmdListHeight = (() => {\n      const boundClient = globals.sessionTimeline.getSvg().node().getBoundingClientRect();\n      let h = util.windowHeight() - (boundClient.y + boundClient.height) - 30; // why minus 30?\n      if (h < 200) {\n        // screen too small (or too many command groups): allow for scrolling\n        h = 300;\n      }\n      return h;\n    })();\n\n    const cmdListScroll = d3.select('body').append('div')\n      .attr('id', 'cmdListScroll')\n      .style('height', cmdListHeight + 'px');\n\n\n    cmdListScroll.selectAll('.collapsibleCmd')\n      .data(commands)\n      .enter()\n      .append('button')\n      .attr('class', 'collapsibleCmd')\n      .attr('id', (cmd) => { return 'cmdListEntry' + cmd.id; })\n      .html((cmd) => {\n        // only display year,month,day of endTime if different from start\n        const actualEndFormat = (cmd.startTime.getFullYear() == cmd.endTime.getFullYear() &&\n          cmd.startTime.getMonth() == cmd.endTime.getMonth() &&\n          cmd.startTime.getDay() == cmd.endTime.getDay()\n        ) ? globals.humanDateFormatOnlyTime : globals.humanDateFormat;\n        return globals.humanDateFormat(cmd.startTime) + ' - ' +\n          actualEndFormat(cmd.endTime) + ': ' +\n          cmd.command;\n      })\n      .style('padding', this._CMDLISTPADDING + 'px')\n      .style('background', (cmd) => { return this._computeCmdBackground(cmd); })\n      .on(\"click\", (cmd, idx) => {\n        if (document.readyState !== \"complete\"){\n          // silently ignore clicks, until everything loaded...\n          return;\n        }\n\n        this._handleClickOnCmd(cmd, idx);\n      });\n  }\n\n  /**\n  * @param {*} cmd command-object to scroll to\n  */\n  scrollToCmd(cmd) {\n    const cmdElement = this._selectCmdEntry(cmd);\n    const scroll = document.getElementById('cmdListScroll');\n    scroll.scrollTop = cmdElement.node().offsetTop;\n\n    cmdElement\n      .transition()\n      .duration(1300) // miliseconds\n      .style(\"background\", \"red\")\n      .on(\"end\", () => { \n        cmdElement.style('background', (cmd) => { return this._computeCmdBackground(cmd); });\n      });\n  }\n\n  _selectCmdEntry(cmd){\n    return d3.select(`#cmdListEntry${cmd.id}`);\n  }\n\n  _computeCmdBackground(cmd){\n    return `linear-gradient(to right,\n      ${cmd.sessionColor} 0px, ${cmd.sessionColor} ${this._CMDLISTPADDING - 1}px,\n      ${this._CMDLISTBG} ${this._CMDLISTPADDING - 1}px, ${this._CMDLISTBG} 100%)`;\n  }\n\n  _handleClickOnCmd(cmd, idx){\n    let contentDiv = d3.select(`#cmdcontent${cmd.id}`);\n    if (! contentDiv.empty()) {\n      contentDiv.remove();\n      return;\n    }\n\n    contentDiv = d3.select('body').append('div')\n      .attr('id', `cmdcontent${cmd.id}`)\n      .attr('class', 'collapsibleCmdContent')\n      .html(`Working directory: ${cmd.workingDir}<br>` +\n        `Command exit status: ${cmd.returnValue}<br>` +\n        `Session uuid: ${cmd.sessionUuid}<br>` +\n        `Command id: ${cmd.id}<br>` +\n        `Hostname: ${cmd.hostname}<br>`);\n\n    const alternatingColor = '#D9D9D9';\n    \n    if (cmd.fileWriteEvents_length > 0) {\n      contentDiv.append('span')\n        .html(cmd.fileWriteEvents_length + ' written files')\n        .style('color', 'red')\n        .style('display', 'block');\n      \n      contentDiv.selectAll('.nonexistentClass')\n        .data(cmd.fileWriteEvents)\n        .enter()\n        .append('span')\n        .style('display', 'block')\n        .style('background-color', (e, idx) => {\n          return (idx % 2 === 0) ? 'transparent' : alternatingColor;\n        })\n        .text((e) => {\n          return `${e.path} (${conversions.bytesToHuman(e.size)}), Hash: ${e.hash}`;\n        });\n\n      if (cmd.fileWriteEvents.length !== cmd.fileWriteEvents_length) {\n        contentDiv.append('span').html(\n          `... and ` +\n          `${cmd.fileWriteEvents_length - cmd.fileWriteEvents.length}` +\n          ` more (see shournal's query help to increase limits)<br>`);\n      }\n    }\n\n    if (cmd.fileReadEvents_length > 0) {\n      if(cmd.fileWriteEvents_length > 0){\n         contentDiv.append('span').html('<br>');\n      }\n      contentDiv.append('span')\n      .html(cmd.fileReadEvents_length + ' read files')\n      .style('color', 'red')\n      .style('display', 'block');\n    }\n \n    contentDiv.selectAll('.nonexistentClass')\n      .data(cmd.fileReadEvents)\n      .enter()\n      .append('span')\n      .style('background-color', (e, idx) => {\n        return (idx % 2 === 0) ? 'transparent' : alternatingColor;\n      })\n      .style('color', (readFile) => { return (readFile.isStoredToDisk) ? 'blue' : 'black'; })\n      .style('cursor', (readFile) => { return (readFile.isStoredToDisk) ? 'pointer' : 'default'; })\n      .style('display', 'block') // only one read file per line\n      .text((e) => { return `${e.path} (${conversions.bytesToHuman(e.size)}), Hash: ${e.hash}`; })\n      .on(\"click\", (readFile) => {\n        if (readFile.isStoredToDisk) {\n          const mtimeHuman = globals.humanDateFormat(d3.isoParse(readFile.mtime));\n          const title = `Read file ${readFile.path}<br>` +\n                        `mtime: ${mtimeHuman}<br>` +\n                        `size: ${conversions.bytesToHuman(readFile.size)}<br>` + \n                        `hash: ${readFile.hash}<br>`;\n\n          const readFileContent = atob(readFileContentMap.get(readFile.id));\n          globals.textDialog.show(title, readFileContent);\n        }\n      });\n    \n    if (cmd.fileReadEvents.length !== cmd.fileReadEvents_length) {\n      contentDiv.append('span').html(\n        `... and ` +\n        `${cmd.fileReadEvents_length - cmd.fileReadEvents.length}` +\n        ` more (see shournal's query help to increase limits)<br>`\n      );\n    }\n\n      \n    const cmdElement = this._selectCmdEntry(cmd);\n    html_util.insertAfter(contentDiv.node(), cmdElement.node());\n\n    const cmdListScroll = document.getElementById('cmdListScroll');\n    if(! html_util.isScrolledIntoView(contentDiv.node(), cmdListScroll, true)){\n      // scroll down one element, so at least the beginning of content is visible:\n      cmdListScroll.scrollTop += cmdElement.node().clientHeight;  \n    } \n  }\n}\n"
  },
  {
    "path": "html-export/src/command_manipulation.js",
    "content": "\nimport * as globals from './globals';\n\n/**\n * Parse the command-date into d3's date and assign session colors\n * @param {[Command]} commands\n */\nexport function prepareCommands(commands){\n  commands.forEach(function(cmd) {\n    cmd.startTime = globals.d3TimeParseIsoWithMil(cmd.startTime);\n    cmd.endTime = globals.d3TimeParseIsoWithMil(cmd.endTime);\n  });\n  _fillCommandSessionColors(commands);\n}\n\n/**\n * Can be passed to array.sort or similar functions.\n * @param {*} cmd1 \n * @param {*} cmd2 \n * @return {int}\n */\nexport function compareStartDates(cmd1, cmd2) {\n  return cmd1.startTime - cmd2.startTime;\n}\n\n/**\n * Can be passed to array.sort or similar functions.\n * @param {*} cmd1 \n * @param {*} cmd2 \n * @return {int}\n */\nexport function compareEndDates(cmd1, cmd2) {\n  return cmd1.endTime - cmd2.endTime;\n}\n\n\n/**\n * Assign session-colors to the given commands.\n * Each session gets a specific color, after n sessions occurred, colors\n * start from beginning again.\n *  @param {[Command]} commands\n*/\nfunction _fillCommandSessionColors(commands){\n  const DISTINCT_COLORS = [\n    '#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0',\n    '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8',\n    '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080',\n  ];\n  let lastColorIdx = 0;\n  const sessionColorMap = new Map();\n  commands.forEach(function(cmd) {\n    if(cmd.sessionUuid === null){\n      cmd.sessionColor = '#000000';\n    } else {\n      let color = sessionColorMap.get(cmd.sessionUuid);\n      if(color === undefined){\n        color = DISTINCT_COLORS[lastColorIdx];\n        sessionColorMap.set(cmd.sessionUuid, color);\n        lastColorIdx++;\n        if(lastColorIdx >= DISTINCT_COLORS.length){\n          lastColorIdx = 0;\n        }\n      }\n      cmd.sessionColor = color;\n    }\n  });\n}\n"
  },
  {
    "path": "html-export/src/command_timeline.js",
    "content": "import * as util from './util';\nimport Tooltip from './tooltip';\nimport AnnotationLineRender from './annotation_line_render';\n\nexport default class CommandTimeline {\n  constructor(commands, countOfCmdGroups,\n    cmdFinalEndDate) {\n    this.commands = commands;\n    this.countOfCmdGroups = countOfCmdGroups;\n    this.cmdFinalEndDate = cmdFinalEndDate;\n\n    this._margin = {\n      top: 20,\n      right: 20,\n      bottom: 24,\n      left: 24,\n    };\n\n    this.commandsPerGroup = this._generateCommandsPerGroup();\n\n    // get the width in pixel of a character\n    this.annotationCharWidth = d3.select(\"#annotation_text_char\").node()\n      .getBoundingClientRect().width;\n    this.annotationCharHeight = d3.select(\"#annotation_text_char\").node()\n      .getBoundingClientRect().height;\n\n    // An annotation shall only be displayed, if its minimum width in pixel\n    // is at least 5 character. Warning: do not set < 1 -> text rendering issues for annotations\n    // distance to the belonging command rect\n    this.ANNOTATION_DISTANCE = 15;\n\n    // minimum width of a cmd-rect. Let it be at least 1, otherwise very short commands\n    // are barely visible (get another color...)\n    this.CMD_MIN_WIDTH = 4;\n\n    this.TOTAL_CMD_GROUP_HEIGHT = _CmdRectHeights.VERY_MANY_MOD + this.ANNOTATION_DISTANCE +\n        this.annotationCharHeight * 2; // *2 to give some more space\n\n    this.cmdGroupOffsets = this._generateCommandGroupOffsets();\n\n    this.svgWidth = util.windowWidth() - this._margin.left - this._margin.right - 30;\n    // min. height, might be increased below\n    this.svgHeight = 100;\n    // If too many command-groups, increase plot size\n    if (this.svgHeight < this.cmdGroupOffsets[this.cmdGroupOffsets.length - 1] + this.TOTAL_CMD_GROUP_HEIGHT) {\n      this.svgHeight = this.cmdGroupOffsets[this.cmdGroupOffsets.length - 1] + this.TOTAL_CMD_GROUP_HEIGHT;\n    }\n\n    this.xScale = d3.scaleTime()\n      .range([0, this.svgWidth]);\n\n    this.axisBottom = d3.axisBottom(this.xScale);\n\n    this.svg = d3.select('body').append('svg')\n       .attr('width', this._margin.left + this.svgWidth + this._margin.right)\n       .attr('height', this._margin.top + this.svgHeight + this._margin.bottom)\n      .append('g')\n      .attr('transform', 'translate(' + this._margin.left + ',' + this._margin.top + ')')\n      .style('z-index', -1);\n\n    this._annotationRender = new AnnotationLineRender(this.svg);\n\n    const listenerRect = this.svg\n      .append('rect')\n      .attr('class', 'listener-rect')\n      .attr('x', 0)\n      .attr('y', -this._margin.top)\n      .attr('width', this._margin.left + this.svgWidth + this._margin.right)\n      .attr('height', this._margin.top + this.svgHeight + this._margin.bottom)\n      .style('opacity', 0);\n\n\n    this.xScale.domain([\n      // the commands are sorted by starttime...\n      this.commands[0].startTime,\n      this.cmdFinalEndDate,\n    ]).nice();\n\n    // draw axes\n    this.xAxisDraw = this.svg.insert('g', ':first-child')\n      .attr('class', 'x axis')\n      .attr('transform', 'translate(0,' + this.svgHeight + ')')\n      .call(this.axisBottom\n        // .ticks(d3.timeWeek, 2)\n        // .tickFormat(d3.timeFormat('%b %d'))\n      );\n\n    this.tooltip = new Tooltip();\n\n    // draw rectangles\n    this.commandRects = this.svg.selectAll('rect')\n      .data(commands)\n      .enter()\n      .append('rect')\n      .attr('x', (cmd) => { return this._calcRectXPosition(cmd, this.xScale); })\n      .attr('y', (cmd) => { return this.svgHeight - this.cmdGroupOffsets[cmd.vertOffsetGroup]; })\n      .attr('width', (cmd) => { return this._calcRectWidth(cmd, this.xScale); })\n      .attr('height', (cmd) => {\n        if(cmd.fileWriteEvents.length === 0) return _CmdRectHeights.NO_MOD;\n        if(cmd.fileWriteEvents.length < 5) return _CmdRectHeights.FEW_MOD;\n        if(cmd.fileWriteEvents.length < 15) return _CmdRectHeights.MANY_MOD;\n        return _CmdRectHeights.VERY_MANY_MOD;\n      })\n      .attr('fill', (cmd, i) => {\n        return cmd.sessionColor;\n        //  maybe_todo: mark 'sessionEnd' with a color?\n        // const p = 0.1 * 100;\n        // const grad = defs.append(\"linearGradient\")\n        //     .attr(\"id\", \"grad_\" + i);\n        // \n        // const color1 = \"orange\";\n        // const color2 = \"steelblue\";\n        // \n        // grad.append(\"stop\")\n        //   .attr(\"offset\", \"0%\")\n        //   .attr(\"stop-color\", color1);\n        // grad.append(\"stop\")\n        //   .attr(\"offset\", (p) + \"%\")\n        //   .attr(\"stop-color\", color1);\n        // grad.append(\"stop\")\n        //   .attr(\"offset\", (p) + \"%\")\n        //   .attr(\"stop-color\", color2);\n        // grad.append(\"stop\")\n        //   .attr(\"offset\", \"100%\")\n        //   .attr(\"stop-color\", color2);\n        // \n        // return \"url(#grad_\" + i + \")\";\n      })\n      .on(\"mouseover\", (cmd) => { \n        this.tooltip.show(cmd.command, d3.event.pageX, d3.event.pageY);\n       })\n      .on(\"mouseout\", () => { this.tooltip.hide(); })\n      .on(\"click\", (cmd) => { this._commandList.scrollToCmd(cmd); });\n\n    this._setupAnnotations();\n\n    const zoom = d3.zoom()\n      .scaleExtent([0.001, 5000])\n      .on(\"zoom\", () => {\n        this._handleZoom(d3.event.transform);\n      });\n    \n    listenerRect.call(zoom);\n  }\n\n  getSvg(){\n    return this.svg;\n  }\n\n  setCommandList(commandList){\n    this._commandList = commandList;\n  }\n\n  _setupAnnotations(){\n    this.commandsPerGroup.forEach((cmdGroup) => {\n      const annotationGroup = [];\n      cmdGroup.forEach((cmd) => {\n        const annotation = {\n          data: cmd,\n          note: {\n            align: \"left\", \n            wrap: 'nowrap',\n            // title: \"Annotation title\"\n          },\n          y: this.svgHeight - this.cmdGroupOffsets[cmd.vertOffsetGroup], // TODO: use cmdGroupIdx??\n          dx: 0,\n          dy: - this.ANNOTATION_DISTANCE,\n          startX: cmd.startTime,\n          endX: cmd.endTime,\n          fulltext: cmd.command,\n        };\n        annotationGroup.push(annotation);\n      });\n      this._annotationRender.addAnnotationGroup(annotationGroup);     \n    });\n    this._annotationRender.setOnNoteOver((cmd) => {\n      this.tooltip.show(cmd.command, d3.event.pageX, d3.event.pageY);\n    });\n    this._annotationRender.setOnNoteOut(() => {\n      this.tooltip.hide();\n    });\n    this._annotationRender.setOnNoteClick((cmd) => {\n      this._commandList.scrollToCmd(cmd);\n    });\n\n    this._annotationRender.update(this.xScale);\n  }\n\n\n  _generateCommandsPerGroup() {\n    // put each cmd-groups into separate arrays:\n    const commandsPerGroup = new Array(this.countOfCmdGroups);\n    for (let i = 0; i < commandsPerGroup.length; i++) {\n      commandsPerGroup[i] = [];\n    }\n    this.commands.forEach( (cmd) => {\n      commandsPerGroup[cmd.vertOffsetGroup].push(cmd);\n    });\n    return commandsPerGroup;\n  }\n\n\n  _generateCommandGroupOffsets() {\n    const offsets = [];\n    // dont start directly on the x-axis, but a little higher\n    let currentOffset = _CmdRectHeights.VERY_MANY_MOD;\n    for (let i = 0; i < this.countOfCmdGroups; i++) {\n      offsets.push(currentOffset);\n      currentOffset += this.TOTAL_CMD_GROUP_HEIGHT;\n    }\n    return offsets;\n  }\n\n\n  _calcRectXPosition(cmd, xScale) {\n    let startX = xScale(cmd.startTime);\n    const w = xScale(cmd.endTime) - startX;\n    if (w < this.CMD_MIN_WIDTH) {\n      // since a cmd has to have at least that width, but shall be\n      // centered anyway:\n      const center = startX + w / 2.0;\n      startX = center - this.CMD_MIN_WIDTH / 2.0;\n    }\n    return startX;\n  }\n\n\n  _calcRectWidth(cmd, xScale) {\n    const w = xScale(cmd.endTime) - xScale(cmd.startTime);\n    if (w < this.CMD_MIN_WIDTH) {\n      return this.CMD_MIN_WIDTH;\n    }\n    return w;\n  }\n\n\n  _handleZoom(transform) {\n    const xScaleNew = transform.rescaleX(this.xScale);\n\n    this.axisBottom.scale(xScaleNew);\n    this.xAxisDraw.call(\n      this.axisBottom\n      // .ticks(d3.timeWeek, 2)\n      // .tickFormat(d3.timeFormat('%b %d'))\n    );\n\n    this.commandRects\n      .attr('x', (cmd) => {\n        const pos = this._calcRectXPosition(cmd, xScaleNew);\n        // note: pos may be less than zero which is ok, because\n        // otherwise wide rects may disappear too soon.\n        return pos;\n      })\n      .attr('width', (cmd) => { return this._calcRectWidth(cmd, xScaleNew); });\n\n    this._annotationRender.update(xScaleNew); \n  }\n\n\n}\n\n// TODO: document it\nclass _CmdRectHeights {\n  static get NO_MOD() { return 7; }\n  static get FEW_MOD() { return 14; }\n  static get MANY_MOD() { return 20; }\n  static get VERY_MANY_MOD() { return 24; }\n}\n"
  },
  {
    "path": "html-export/src/conversions.js",
    "content": "\n/**\n * @return {String} human readble byte-size-string\n * @param {int} bytes \n * @param {boolean} si if true: use 1000 as base (kB), else 1024 (KiB)\n */\nexport function bytesToHuman(bytes, si = false) {\n  const thresh = si ? 1000 : 1024;\n  if (Math.abs(bytes) < thresh) {\n    return bytes + ' B';\n  }\n  const units = si ?\n   ['kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] :\n   ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'];\n  let u = -1;\n  do {\n    bytes /= thresh;\n    ++u;\n  } while (Math.abs(bytes) >= thresh && u < units.length - 1);\n  return bytes.toFixed(1) + ' ' + units[u];\n}\n"
  },
  {
    "path": "html-export/src/d3js_util.js",
    "content": "\n\n/**\n * Wrap long axis labels to mutliple lines by maximum width, splitting words by \n * the given *delimeter-keeping* splitStr and auto-truncating long words.\n * Leading and trailing whitespaces of each line are trimmed.\n * @param {[String]} tickTexts \n * @param {int} width The max. width in pixels for each label \n * @param {RegExp} splitStr A regular expression for the split-string, \n * which keeps the delimeter, e.g. /(?=\\s)/.\n */\nexport function wrapTextLabels(tickTexts, width, splitStr=/(?=\\s)/) {\n  tickTexts.each(function() {\n    const text = d3.select(this);\n    const words = text.text().split(splitStr);\n    let line = [];\n    let lineNumber = 0;\n    const lineHeight = 1.1; // ems\n    const y = text.attr(\"y\");\n    const dy = parseFloat(text.attr(\"dy\"));\n    let tspan = text.text(null).append(\"tspan\").attr(\"x\", 0).attr(\"y\", y)\n      .attr(\"dy\", dy + \"em\");\n    // only increment i, if the word can be for sure drawn to current line\n    for(let i=0; i < words.length; ) {\n      line.push(words[i]);\n      tspan.text(line.join(''));\n      if (tspan.node().getComputedTextLength() <= width) {\n        ++i;\n        continue;\n      }\n\n      if (line.length === 1) {\n        // this single word is too long to fit into a line -> clip it\n        _truncateAndSetLabelTxt(line[0].trim(), tspan, width);\n        ++i;    \n      } else {\n        // this word does not fit any more -> put it to next line \n        // and render all others\n        line.pop();\n        tspan.text(line.join('').trim());\n        \n        // NO ++i -> the current word must be rendered in next line\n      } \n      tspan = text.append(\"tspan\").attr(\"x\", 0).attr(\"y\", y)\n        .attr(\"dy\", `${++lineNumber * lineHeight + dy}em`).text(null);\n      line = [];\n    }\n  });\n}\n\n\nfunction _truncateAndSetLabelTxt(labelTxt, tspan, width) {\n  do {\n    labelTxt = labelTxt.slice(0, -3);\n    tspan.text(labelTxt);\n  } while (tspan.node().getComputedTextLength() > width);\n  labelTxt = labelTxt.slice(0, -2);\n  labelTxt += '..';\n  tspan.text(labelTxt);\n}\n"
  },
  {
    "path": "html-export/src/generic_text_dialog.js",
    "content": "\n\nexport default class GenericTextDialog {\n  constructor() {\n  }\n\n  show(title, content){\n    $(\"#genericModalTitle\").html(title);\n    $(\"#genericModalBody\").html(content);\n    $(\"#genericModal\").modal('toggle');\n  }\n}\n"
  },
  {
    "path": "html-export/src/globals.js",
    "content": "\n\nimport GenericTextDialog from './generic_text_dialog';\n\nexport let humanDateFormat;\nexport let humanDateFormatOnlyDate;\nexport let humanDateFormatOnlyTime;\n\nexport let d3TimeParseIsoWithMil;\n\nexport let textDialog;\n\nexport let commandList;\nexport let sessionTimeline;\n\nexport function init(){\n  humanDateFormat = d3.timeFormat(\"%Y-%m-%d %H:%M\");\n  humanDateFormatOnlyDate = d3.timeFormat(\"%Y-%m-%d\");\n  humanDateFormatOnlyTime = d3.timeFormat(\"%H:%M\");\n\n  d3TimeParseIsoWithMil = d3.timeParse(\"%Y-%m-%dT%H:%M:%S.%L\");\n\n  textDialog = new GenericTextDialog();\n}\n"
  },
  {
    "path": "html-export/src/html_util.js",
    "content": "\n\nexport function insertAfter(newNode, referenceNode) {\n  referenceNode.parentNode.insertBefore(newNode, referenceNode.nextSibling);\n}\n\n\n/**\n * Check if element is visible inside container - also partially at your wish.\n * @return {boolean}\n * @param {Element} element \n * @param {Element} container \n * @param {boolean} partial if true, return true, if not completely but partially\n * visible\n */\nexport function isScrolledIntoView(element, container, partial) {\n   // Get container properties\n   const cTop = container.scrollTop;\n   const cBottom = cTop + container.clientHeight;\n\n   // Get element properties\n   const eTop = element.offsetTop;\n   const eBottom = eTop + element.clientHeight;\n\n   // Check if in view    \n   const isTotal = (eTop >= cTop && eBottom <= cBottom);\n   const isPartial = partial && (\n     (eTop < cTop && eBottom > cTop) ||\n     (eBottom > cBottom && eTop < cBottom)\n   );\n\n   return (isTotal || isPartial);\n}\n"
  },
  {
    "path": "html-export/src/index.js",
    "content": "\nimport * as command_manipulation from './command_manipulation';\nimport CommandList from './command_list';\nimport {assert} from './util';\nimport * as globals from './globals';\nimport SessionTimeline from './session_timeline';\nimport * as stats from './stats';\n\nfunction displayErrorAtTop(msg){\n  // vanilla js, since loading of libraries might have failed\n  const errEl = document.getElementById('topError');\n  errEl.style[\"visibility\"] = \"visible\";\n  errEl.innerHTML = msg;\n}\n\n\nfunction main() {\n  if (scriptLoadError) {\n    console.log(scriptLoadError);\n    displayErrorAtTop(scriptLoadError);\n    return;\n  }\n\n  globals.init();\n\n  assert(commands.length > 0, 'commands.length > 0');\n\n  const queryDate = globals.d3TimeParseIsoWithMil(ORIGINAL_QUERY_DATE_STR);\n  const body = d3.select('body');\n\n  body.append('button')\n    .attr('class', 'btn btn-primary')\n    .style('position', 'absolute')\n    .style('right', '0px')\n    .style('top', '0px')\n    .html(\"Report Metadata\")\n    .on(\"click\", () => {\n      globals.textDialog.show(\"Report Metadata\", \n        `Commandline-query (executed on ` + \n        `${globals.humanDateFormat(queryDate)}): ${ORIGINAL_QUERY}`);\n    });\n\n  command_manipulation.prepareCommands(commands);\n\n  {\n    let lastStart = commands[0].startTime;\n    for(let i=1; i < commands.length; i++){\n      assert(commands[i].startTime >= lastStart);\n      lastStart = commands[i].startTime;\n    }\n  }\n  \n\n  const cmdFinalEndDate = globals.d3TimeParseIsoWithMil(CMD_FINAL_ENDDATE_STR);\n \n  // Do not change order -> commandList.size computed based on sessionTimeLine.size.\n  globals.sessionTimeline = new SessionTimeline(commands, cmdFinalEndDate);\n  globals.commandList = new CommandList(commands);\n  \n  d3.select('#initialSpinner').remove();\n  $(document).ready(stats.generateMiscStats);\n}\n\n\ntry {\n  main();\n} catch (error) {\n  console.log(error);\n  displayErrorAtTop(error);\n}\n"
  },
  {
    "path": "html-export/src/limited_queue.js",
    "content": "\n\nimport TinyQueue from 'tinyqueue';\n\n/**\n * Allow for a max. length of the queue.\n * Add further convenience functions\n */\nexport default class LimitedQueue extends TinyQueue {\n\n  setMaxLength(l){\n    this._maxLength = l;\n  }\n\n  /**\n   * @override\n   */\n  push(item) {\n    super.push(item);\n    if(this._maxLength !== undefined && this.length > this._maxLength){\n      this.pop();\n    }\n  }\n\n  popAll(){\n    const items = [];\n    while (this.length > 0) { \n      items.push(this.pop());\n    }\n    return items;\n  }\n}\n"
  },
  {
    "path": "html-export/src/map_extended.js",
    "content": "\n\nexport default class MapExtended extends Map {\n  \n  /**\n   * Like get() but insert and return a default, if the key\n   * does not exist\n   * @return {*} \n   * @param {*} key \n   * @param {Function} defaultFactory A parameterless function whose return value\n   * is used as default.\n   */\n  getDefault(key, defaultFactory) {\n    if(defaultFactory === undefined){\n      throw Error('defaultValue must not be undefined');\n    }\n    let val = this.get(key);\n    if(val === undefined){\n      val = defaultFactory();\n      this.set(key, val);\n    }\n    return val;\n  }\n\n}\n"
  },
  {
    "path": "html-export/src/plot_cmdcount_per_cwd.js",
    "content": "\n\nimport PlotSimpleBar from './plot_simple_bar';\n\n/**\n * A bar plot displaying the working directories\n * where the most commands were executed.\n */\nexport default class PlotCmdCountPerCwd extends PlotSimpleBar {\n\n  generatePlot(commands, siblingElement){\n    super.generatePlot(cwdCmdCounts, siblingElement);\n\n  }\n  /**\n   * @override\n   */\n  _chartTitle(){ return 'Working directories with most commands'; }\n\n\n  /**\n   * @override\n   */  \n  _yScaleBandDomain(){ return [0, cwdCmdCounts[0].countOfCommands]; }\n\n  /**\n   * @override\n   */  \n  _xValue(cwdCmdCount) {\n    return cwdCmdCount.workingDir;\n  }  \n\n  /**\n   * @override\n   */  \n  _yValue(cwdCmdCount) {\n    return cwdCmdCount.countOfCommands;\n  }  \n\n  /**\n   * @override\n   */  \n  _yAxisTicksFilter(tick){ return Number.isInteger(tick); }\n\n  /**\n   * @override\n   */  \n  _yAxisTickFormat() { return d3.format('d'); }\n\n\n  _xTxtLabelSplitStr() { return /(?=\\/)/; }\n\n}\n\n\n"
  },
  {
    "path": "html-export/src/plot_cmdcount_per_session.js",
    "content": "\n\nimport PlotSimpleBar from './plot_simple_bar';\nimport * as globals from './globals';\n\n/**\n * A bar plot displaying the sessions\n * wherein the most commands were executed.\n */\nexport default class PlotCmdCountPerSession extends PlotSimpleBar {\n\n  generatePlot(commands, siblingElement) {\n      this._sessionMostCmds = [];\n      sessionsMostCmds.forEach((e) => {\n        this._sessionMostCmds.push(\n          new _SessionMostCmdsEntry(commands[e.idxFirstCmd], e.countOfCommands)\n          );\n      });\n      this._maxCountOfCmdsInSession = this._sessionMostCmds[0].countOfCommands;\n    \n    // sort the sessions by start date\n    this._sessionMostCmds.sort((s1, s2) => {\n      return s1.firstCmd.startTime - s2.firstCmd.startTime;\n    });\n\n    super.generatePlot(this._sessionMostCmds, siblingElement);\n\n  }\n  /**\n   * @override\n   */\n  _chartTitle(){ return 'Sessions with most commands'; }\n\n\n  /**\n   * @override\n   */  \n  _yScaleBandDomain(){ return [0, this._maxCountOfCmdsInSession]; }\n\n  /**\n   * @override\n   */  \n  _xValue(session) {\n    return session.firstCmd.sessionUuid;\n  }  \n\n  /**\n   * @override\n   */  \n  _yValue(session) {\n    return session.countOfCommands;\n  }  \n\n  /**\n   * @override\n   */  \n  _yAxisTicksFilter(tick){ return Number.isInteger(tick); }\n\n  /**\n   * @override\n   */  \n  _yAxisTickFormat() { return d3.format('d'); }\n\n\n  /**\n   * @return {int}\n   * @param {[Command]} cmds1 \n   * @param {[Command]} cmds2 \n   */\n  _compareBySessionCmdCount(cmds1, cmds2) {\n    return cmds1.length - cmds2.length;\n  }\n\n  _barColor(session){\n    return session.firstCmd.sessionColor;\n  }\n\n  _modifyBars(bars){\n    bars\n      .style('cursor', 'pointer')\n      .on(\"click\", (session) => { \n        globals.commandList.scrollToCmd(session.firstCmd); \n      });\n  }\n\n  _modifyTickText(tickTxt, session) {\n    tickTxt\n      .style('cursor', 'pointer')\n      .on(\"click\", () => { \n        globals.commandList.scrollToCmd(session.firstCmd); \n      });\n  }\n\n}\n\n\nclass _SessionMostCmdsEntry {\n  constructor(firstCmd, countOfCommands){\n    this.firstCmd = firstCmd;\n    this.countOfCommands = countOfCommands;\n  }\n}\n\n\n"
  },
  {
    "path": "html-export/src/plot_io_per_dir.js",
    "content": "\n\nimport PlotSimpleBar from './plot_simple_bar';\n\n/**\n * A bar plot displaying directories\n * with most IO-activity.\n */\nexport default class PlotIoPerDir extends PlotSimpleBar {\n\n  generatePlot(commands, siblingElement){\n    super.generatePlot(dirIoCounts, siblingElement);\n\n  }\n  /**\n   * @override\n   */\n  _chartTitle(){ return 'Directories with most input-output-activity'; }\n\n\n  /**\n   * @override\n   */  \n  _yScaleBandDomain(){ return [0, dirIoCounts[0].readCount + dirIoCounts[0].writeCount]; }\n\n  /**\n   * @override\n   */  \n  _xValue(ioStat) {\n    return ioStat.dir;\n  }  \n\n  /**\n   * @override\n   */  \n  _yValue(ioStat) {\n    return ioStat.readCount + ioStat.writeCount;\n  }  \n\n  /**\n   * @override\n   */  \n  _yAxisTicksFilter(tick){ return Number.isInteger(tick); }\n\n  /**\n   * @override\n   */  \n  _yAxisTickFormat() { return d3.format('d'); }\n  \n  /**\n   * @override\n   */  \n  _xTxtLabelSplitStr() { return /(?=\\/)/; }\n}\n"
  },
  {
    "path": "html-export/src/plot_most_written_files.js",
    "content": "\nimport PlotSimpleBar from './plot_simple_bar';\n\nimport * as command_manipulation from './command_manipulation';\nimport * as globals from './globals';\n\n/**\n * A bar plot displaying the commands which\n * modified the most files.\n */\nexport default class PlotMostWrittenFiles extends PlotSimpleBar {\n\n  generatePlot(commands, siblingElement){\n    this._filteredCmds = [];\n    mostFileMods.forEach((e) => {\n      this._filteredCmds.push(commands[e.idx]);\n    });\n    this._maxCountOfWfileEvents = this._filteredCmds[0].fileWriteEvents_length;\n\n    // Be consistent with timeline and sort by date:\n    this._filteredCmds.sort(command_manipulation.compareStartDates);\n\n    super.generatePlot(this._filteredCmds, siblingElement);\n\n  }\n  /**\n   * @override\n   */\n  _chartTitle(){ return 'Commands with most file-modifications'; }\n\n  /**\n   * @override\n   */  \n  _yScaleBandDomain(){ return [0, this._maxCountOfWfileEvents]; }\n\n  /**\n   * @override\n   */  \n  _xValue(cmd) {\n    return globals.humanDateFormatOnlyDate(cmd.startTime) + \": \" +\n      cmd.command;\n  }  \n\n  /**\n   * @override\n   */  \n  _yValue(cmd) {\n    return cmd.fileWriteEvents_length;\n  }  \n\n  /**\n   * @override\n   */  \n  _yAxisTicksFilter(tick){ return Number.isInteger(tick); }\n\n  /**\n   * @override\n   */  \n  _yAxisTickFormat() { return d3.format('d'); }\n\n\n  _barColor(cmd){\n    return cmd.sessionColor;\n  }\n\n  _modifyBars(bars){\n    bars\n      .style('cursor', 'pointer')\n      .on(\"click\", (cmd) => { \n        globals.commandList.scrollToCmd(cmd); \n      });\n  }\n\n  _modifyTickText(tickTxt, cmd) {\n    tickTxt\n      .style('cursor', 'pointer')\n      .on(\"click\", () => { \n        globals.commandList.scrollToCmd(cmd); \n      });\n  }\n}\n\n\n"
  },
  {
    "path": "html-export/src/plot_simple_bar.js",
    "content": "\n\nimport * as d3js_util from './d3js_util';\nimport {ErrorNotImplemented} from './util';\n\n/**\n * Base class for several bar plots\n */\nexport default class PlotSimpleBar {\n  constructor() {\n    this._margin = { top: 20, right: 20, bottom: 60, left: 40 };\n    this._width = 500 - this._margin.left - this._margin.right;\n    this._height = 300 - this._margin.top - this._margin.bottom;\n\n    this._maxBarWidth = 30;\n  }\n\n  generatePlot(data, siblingElement) {\n    const plotContainer = siblingElement.append('div')\n      .style('position', 'relative')\n      .style('padding', '12px')\n      .style('display', 'inherit');\n      \n\n    this._svg = plotContainer.append(\"svg\")\n      .attr(\"width\", this._width + this._margin.left + this._margin.right)\n      .attr(\"height\", this._height + this._margin.top + this._margin.bottom)\n      .append(\"g\")\n      .attr(\"transform\",\n        \"translate(\" + this._margin.left + \",\" + this._margin.top + \")\");\n\n    // chart title\n    const chartTitle = this._svg.append(\"text\")\n      .attr(\"x\", (this._width / 2.0))\n      .attr(\"y\", -3)\n      .attr(\"text-anchor\", \"middle\")\n      .style(\"font-size\", \"16px\")\n      .style(\"text-decoration\", \"underline\")\n      .text(this._chartTitle());\n\n    this._xScaleBand = d3.scaleBand()\n      .range([0, this._width])\n      .padding(0.1);\n    this._yScaleBand = d3.scaleLinear()\n      // leave some space for the char title:\n      .range([this._height, chartTitle.node().getBoundingClientRect().height * 1.2]);\n\n    // In case of duplicate x-axis label values they are overridden, which should\n    // never be desired. Instead build a range and access the respective data-array-element\n    // by index.\n    this._xScaleBand.domain(d3.range(data.length));\n    this._yScaleBand.domain(this._yScaleBandDomain());\n\n    const actualBandWidth = (this._xScaleBand.bandwidth() > this._maxBarWidth) ? \n      this._maxBarWidth :\n      this._xScaleBand.bandwidth();\n\n\n    // append the rectangles for the bar chart\n\n    const dataEnterSelection = this._svg.selectAll(\".bar\").data(data).enter();\n\n    const bars = dataEnterSelection.append(\"rect\")\n      .style('fill',(d, i) => { return this._barColor(d); })\n      .attr(\"x\", (d, i) => { \n        let x = this._xScaleBand(i);\n        const center = x + this._xScaleBand.bandwidth()/2.0;\n        x = center - actualBandWidth/2.0;\n        return x;\n       })\n      .attr(\"width\", actualBandWidth) \n      .attr(\"y\", (d) => { return this._yScaleBand(this._yValue(d)); })\n      .attr(\"height\", (d) => { return this._height - this._yScaleBand(this._yValue(d)); })\n      .attr('data-toggle', 'tooltip')\n      .attr('title', (d) => { return this._barTooltipTxt(d); });\n  \n    this._modifyBars(bars);\n      \n    // add the x Axis\n    this._svg.append(\"g\")\n      .attr(\"transform\", \"translate(0,\" + this._height + \")\")\n      .call(d3.axisBottom(this._xScaleBand).tickFormat((d,i)=> this._xValue(data[i])))\n      .selectAll(\"text\")\n      .call((tickTexts) => {\n        const thisPlot = this;\n        tickTexts.each(function (plainTxt, idx) {\n          const text = d3.select(this);\n          text.attr(\"title\", function () {\n            return thisPlot._xAxisTooltipTxt.call(thisPlot, data[idx]);\n          }).attr('data-toggle', 'tooltip')\n            .attr('data-placement', 'left');\n          thisPlot._modifyTickText(text, data[idx]);  \n        });\n\n        d3js_util.wrapTextLabels(tickTexts, \n          this._xScaleBand.bandwidth(), \n          this._xTxtLabelSplitStr());  \n      });\n\n    // add the y Axis\n    const yAxisTicks = this._yScaleBand.ticks()\n      .filter((tick) => { return this._yAxisTicksFilter(tick); });\n    this._yaxis = d3.axisLeft(this._yScaleBand);\n    const yTickFormat = this._yAxisTickFormat();\n    if(yTickFormat !== undefined){\n      this._yaxis.tickValues(yAxisTicks).tickFormat( yTickFormat );\n    }\n\n    this._svg.append(\"g\").call(this._yaxis);      \n  }\n\n  \n  // MUST override methods\n  _chartTitle(){ throw new ErrorNotImplemented(); }\n  _yScaleBandDomain(){ throw new ErrorNotImplemented(); }\n  // Is called for each x-value\n  _xValue(d){ throw new ErrorNotImplemented(); }\n  // Is called for each y-value\n  _yValue(d){ throw new ErrorNotImplemented(); }\n\n  // MAY override methods\n  _yAxisTicksFilter(tick){ return true; }\n  _yAxisTickFormat() { return undefined; }\n  _modifyTickText(tickTxt, data) {}\n\n  _xTxtLabelSplitStr() { return /(?=\\s)/; }  \n  _barTooltipTxt(dataElement){\n    return this._xValue(dataElement);\n  }\n  _xAxisTooltipTxt(dataElement){\n    return this._xValue(dataElement);\n  }\n  _barColor(dataElement){\n    return 'steelblue';\n  }\n  // apply further modifications to the bars\n  _modifyBars(bars){}\n\n}\n\n"
  },
  {
    "path": "html-export/src/session_timeline.js",
    "content": "import * as util from './util';\nimport MapExtended from './map_extended';\nimport TimelineGroupFind from './timeline_group_find';\nimport AnnotationLineRender from './annotation_line_render';\nimport ZoomButtons from './zoom_buttons';\nimport * as globals from './globals';\n\nexport default class SessionTimeline {\n  constructor(commands, cmdFinalEndDate) {\n    this.cmdFinalEndDate = cmdFinalEndDate;\n\n    this._margin = {\n      top: 20,\n      right: 20,\n      bottom: 24,\n      left: 24,\n    };\n\n    // get the width in pixel of a character\n    this.annotationCharWidth = d3.select(\"#annotation_text_char\").node()\n      .getBoundingClientRect().width;\n    this.annotationCharHeight = d3.select(\"#annotation_text_char\").node()\n      .getBoundingClientRect().height;\n\n    // height of a session with no forks (parallel commands )\n    this.sessionBaseHeight = this.annotationCharHeight / 1.5;\n    this.sessionPadding = this.annotationCharHeight / 5;\n    // choose less than two, so two parallel commands\n    // are already wider than a lonely command.\n    this.sessionMinHeight = this.sessionBaseHeight / 1.5;\n\n    // An annotation shall only be displayed, if its minimum width in pixel\n    // is at least 5 character. Warning: do not set < 1 -> text rendering issues for annotations\n    this.annotationMinWidth = this.annotationCharWidth * 5;\n    // distance to the belonging command rect\n    this.annotationDistance = this.annotationCharHeight / 3.0;\n    this.commandRects = [];\n\n    \n    // minimum width of a cmd-rect. Let it be at least 1, otherwise very short commands\n    // are barely visible (get another color...)\n    this.CMD_MIN_WIDTH = 4;\n\n    this.svgWidth = util.windowWidth() - this._margin.left - this._margin.right - 30;\n\n    const plotContainer = d3.select('body').append('div')\n      .style('position', 'relative'); // see https://stackoverflow.com/a/10487329\n\n    this.svg = plotContainer.append('svg');\n    this._annotationRender = new AnnotationLineRender(this.svg);\n\n    const groupedSessions = this._generateCommandsPerSession(commands);\n    this.svgHeight = Math.max(100, this._prerenderSessions(groupedSessions));\n\n    this.xScale = d3.scaleTime()\n      .range([0, this.svgWidth]);\n\n    this._yScale = d3.scaleLinear()\n      .range([this.svgHeight, 0]);\n    this._yScale.domain([0, this.svgHeight]);\n\n    this.axisBottom = d3.axisBottom(this.xScale);\n\n    this.svg.attr('width', this._margin.left + this.svgWidth + this._margin.right)\n    .attr('height', this._margin.top + this.svgHeight + this._margin.bottom)\n    .append('g')\n    .attr('transform', 'translate(' + this._margin.left + ',' + this._margin.top + ')')\n    .style('z-index', -1);\n\n    const listenerRect = this.svg\n      .append('rect')\n      .attr('class', 'listener-rect')\n      .attr('x', 0)\n      .attr('y', -this._margin.top)\n      .attr('width', this._margin.left + this.svgWidth + this._margin.right)\n      .attr('height', this._margin.top + this.svgHeight + this._margin.bottom)\n      .style('opacity', 0);\n\n\n    this.xScale.domain([\n      // the commands are sorted by starttime...\n      commands[0].startTime,\n      this.cmdFinalEndDate,\n    ]).nice();\n\n    // draw axes\n    this.xAxisDraw = this.svg.insert('g', ':first-child')\n      .attr('class', 'x axis')\n      .attr('transform', 'translate(0,' + this.svgHeight + ')')\n      .call(this.axisBottom\n        // .ticks(d3.timeWeek, 2)\n        // .tickFormat(d3.timeFormat('%b %d'))\n      );\n\n    const _drawSession = (session, idx, lineIdx) => {\n      // draw rectangles\n      const className = 'sessionTimeSeries' + \n        session.getSessionGroup() + idx;\n      this.commandRects.push(this.svg.selectAll('.' + className)\n        .data(session.getCmdsWithMeta())\n        .enter()\n        .append('rect')\n        .attr('class', className)\n        .attr('x', (cmdWithMeta) => { \n          return this._calcRectXPosition(cmdWithMeta.cmd, this.xScale); \n        })\n        .attr('y', (cmdWithMeta) => { \n          // rects are drawn from top to bottom, so add the height:\n          return this._yScale(cmdWithMeta.getY() + cmdWithMeta.getHeight()); \n        })\n        .attr('width', (cmdWithMeta) => { \n          return this._calcRectWidth(cmdWithMeta.cmd, this.xScale); \n        })\n        .attr('height', (cmdWithMeta) => { \n          return cmdWithMeta.getHeight(); \n        })\n        .attr('fill', (cmdWithMeta) => { \n          // TODO: rather determine the session color in this class\n          // on a per line-basis, so the same color appears as seldom\n          // as possible in a given line (?).\n          // But what about the colors in the cmd-list?...\n          return cmdWithMeta.cmd.sessionColor; \n        } )\n        .style('cursor', 'pointer')\n        .attr('title', (cmdWithMeta) => { return cmdWithMeta.cmd.command; })\n        .on(\"click\", (cmdWithMeta) => { \n          globals.commandList.scrollToCmd(cmdWithMeta.cmd); \n        })\n        );\n      $('.' + className).tooltip({\n        delay: { show: 50, hide: 0 },\n      });\n\n    }; \n\n    groupedSessions.forEach((sessionLine, lineIdx) => {\n      sessionLine.forEach((session, sessionIdx) => {\n        _drawSession(session, sessionIdx, lineIdx);\n      });\n    });   \n\n\n    this._preRenderAnnotations(groupedSessions);\n    this._annotationRender.setOnNoteClick((cmdWithMeta) => {\n      globals.commandList.scrollToCmd(cmdWithMeta.cmd);\n    });\n    this._annotationRender.update(this.xScale);\n        \n\n    const minTimeMilli = 20000; // do not allow zooming beyond displaying 20 seconds\n    const maxTimeMilli = 6.3072e+11; // approx 20 years\n\n    const currentWidthMilli = cmdFinalEndDate - commands[0].startTime;\n\n    const minScaleFactor = currentWidthMilli / maxTimeMilli;\n    const maxScaleFactor = currentWidthMilli / minTimeMilli;\n\n    const zoom = d3.zoom()\n      // .scaleExtent([0.001, 5000])\n      .scaleExtent([minScaleFactor, maxScaleFactor])\n      .on(\"zoom\", () => {\n        this._handleZoom(d3.event.transform);\n      });\n\n    this._zoomButtons = new ZoomButtons(plotContainer, listenerRect, zoom);\n\n    listenerRect.call(zoom);\n  }\n\n  getSvg(){\n    return this.svg;\n  }\n\n  _generateCommandsPerSession(commands) {\n    const assignParallelCmdCounts = (commandsPerSession) => {\n      // find out the number of parallel commands in each session and store it \n      // in the meta-info of each cmd. The groups are already assigned, one command\n      // is parallel to another, if there exists at least one command\n      // between two zero-group-commands. Note that the groups of\n      // those in-between-commands may rise and fall arbitrarily often,\n      // so keep track of the max.\n      commandsPerSession.forEach((session) => {\n        // index in the sessions cmd-array, where the last group 0 was seen\n        let lastZeroGroupIdx = 0;\n        let lastHighestGroup = 0;\n        // yes, <= to simplify handling the final command\n        for (let i = 1; i <= session.getCmdsWithMeta().length; i++) {\n          if (i >= session.getCmdsWithMeta().length || \n              session.getCmdsWithMeta()[i].getGroup() === 0) {\n            // a new group has started or we are at end. Assign the found number of parallel\n            // commands to all affected commands:\n            const countOfParallelCmds = lastHighestGroup + 1; // zero based..\n\n            for (let j = lastZeroGroupIdx; j < i; j++) {\n              session.getCmdsWithMeta()[j].setCountOfParallelGroups(countOfParallelCmds);\n            }\n            // also keep track of the max number of parallel commands in this session\n            // for later use\n            session.setMaxCountOfParallelCommands(\n              Math.max(session.getMaxCountOfParallelCommands(), countOfParallelCmds)\n            );\n\n            lastZeroGroupIdx = i;\n            lastHighestGroup = 0;\n          } else {\n            // keep track of the highest group\n            lastHighestGroup = Math.max(lastHighestGroup, \n              session.getCmdsWithMeta()[i].getGroup());\n          }\n        }\n      });\n    };\n\n    const commandsPerSession = new MapExtended();\n\n    commands.forEach( (cmd) => {\n      // note: Map()' iteration order is the insert order, which is\n      // desired here -> since the command-array is ordered by startDateTime,\n      // the generated session map is also ordered by startDateTime\n      const session = commandsPerSession.getDefault(cmd.sessionUuid, \n        () => { return new _Session(); });\n      session.addCmd(cmd);\n    });\n\n    assignParallelCmdCounts(commandsPerSession);\n\n    // assign a group to each session\n    const sessionGrpFind = new TimelineGroupFind();\n    let maxGroup = 0;\n    commandsPerSession.forEach((session) => {\n      const group = sessionGrpFind.findNextFreeGroup(session.getSessionStartDate(),\n        session.getSessionEndDate());\n\n      session.setSessionGroup(group);\n      maxGroup = Math.max(maxGroup, group);\n    });\n\n    // generate an array of an array of sessions, so all sessions which have\n    // the same group are in one array (in correct order).\n    // That way one 'line' of sessions can be \n    // drawn easily.\n    const groupedSessions = new Array(maxGroup + 1);\n    for (let i = 0; i < groupedSessions.length; i++) {\n      groupedSessions[i] = [];\n    }\n    commandsPerSession.forEach( (session) => {\n      groupedSessions[session.getSessionGroup()].push(session);\n    });\n\n    return groupedSessions;\n  }\n\n\n  /**\n   * @return {int} max y offset of the plot\n   * @param {*} groupedSessions \n   */\n  _prerenderSessions(groupedSessions){\n    const ANNOTATION_AND_PADDING = this.annotationDistance + \n      this.annotationCharHeight * 1.5; // * 1.5 -> give some more space\n\n    const _prerenderCmd = (cmdWithMeta, currentOffset, sessionHeight) => {\n      if(cmdWithMeta.getCountOfParallelGroups() === 1){\n        // non-parallel commands are aligned to session center:\n        cmdWithMeta.setHeight(this.sessionBaseHeight);\n        const y = currentOffset + sessionHeight/2 - this.sessionBaseHeight/2;\n        cmdWithMeta.setY(y);\n        return;\n      }\n      // parallel commands expand in equal parts over the whole sessionHeight \n      // (separated by padding)\n      let cmdHeight = sessionHeight / cmdWithMeta.getCountOfParallelGroups();\n      if(cmdHeight < this.sessionMinHeight){\n        cmdHeight = this.sessionMinHeight;\n      } else {\n        cmdHeight -= this.sessionPadding;\n      }\n      cmdWithMeta.setHeight(cmdHeight);\n      const y = currentOffset + (cmdHeight + this.sessionPadding) * cmdWithMeta.getGroup();\n      cmdWithMeta.setY(y);\n    };\n\n    let currentOffset = 0;\n    groupedSessions.forEach((sessionLine, lineIdx) => {\n      // find the max. number of parallel commands in all sessions of the current line:\n      const maxNumberOfParallelCmds = sessionLine.reduce((prev, curr) => {\n        return prev.getMaxCountOfParallelCommands() > curr.getMaxCountOfParallelCommands() ?\n         prev : curr;\n      }).getMaxCountOfParallelCommands();    \n      const sessionHeight = maxNumberOfParallelCmds === 1 ?\n       this.sessionBaseHeight :\n       (this.sessionMinHeight + this.sessionPadding) * maxNumberOfParallelCmds;\n\n      sessionLine.forEach((session) => {\n        session.getCmdsWithMeta().forEach((cmdWithMeta) => {\n          _prerenderCmd(cmdWithMeta, currentOffset, sessionHeight);\n        });\n        session.setHeight(sessionHeight);\n        session.setY(currentOffset);\n      });\n      currentOffset += sessionHeight + ANNOTATION_AND_PADDING;\n    });    \n    return currentOffset;    \n  }\n\n  _preRenderAnnotations(groupedSessions){\n    groupedSessions.forEach((sessionLine) => { \n      const annotationGroup = [];\n      sessionLine.forEach((session) => {\n        session.getCmdsWithMeta().forEach((cmdWithMeta) => {\n          // only create annotations for the topmost commandgroup \n          // (in case of parallel commands)\n          if(cmdWithMeta.getCountOfParallelGroups() === cmdWithMeta.getGroup() + 1){\n            annotationGroup.push(this._createAnnotation(cmdWithMeta, \n              session.getY() + session.getHeight() + this.annotationDistance ));\n          }\n        });\n      });\n      this._annotationRender.addAnnotationGroup(annotationGroup);\n    });    \n  }\n  \n\n  _createAnnotation(cmdWithMeta, y){\n    return {\n      data: cmdWithMeta,\n      note: {\n        align: \"left\", \n        wrap: 'nowrap',\n        // title: \"Annotation title\"\n      },\n      dx: 0,\n      ny: this.svgHeight - y,\n      y: this.svgHeight - (cmdWithMeta.getY() + cmdWithMeta.getHeight()),\n      startX: cmdWithMeta.cmd.startTime,\n      endX: cmdWithMeta.cmd.endTime,\n      fulltext: cmdWithMeta.cmd.command,\n    };\n  }\n\n\n  _calcRectXPosition(cmd, xScale) {\n    let startX = xScale(cmd.startTime);\n    const w = xScale(cmd.endTime) - startX;\n    if (w < this.CMD_MIN_WIDTH) {\n      // since a cmd has to have at least that width, but shall be\n      // centered anyway:\n      const center = startX + w / 2.0;\n      startX = center - this.CMD_MIN_WIDTH / 2.0;\n    }\n    return startX;\n  }\n\n\n  _calcRectWidth(cmd, xScale) {\n    const w = xScale(cmd.endTime) - xScale(cmd.startTime);\n    if (w < this.CMD_MIN_WIDTH) {\n      return this.CMD_MIN_WIDTH;\n    }\n    return w;\n  }\n\n\n  _handleZoom(transform) {\n    const xScaleNew = transform.rescaleX(this.xScale);\n\n    this.axisBottom.scale(xScaleNew);\n    this.xAxisDraw.call(\n      this.axisBottom\n      // .ticks(d3.timeWeek, 2)\n      // .tickFormat(d3.timeFormat('%b %d'))\n    );\n    // maybe_todo: execute in parallel...\n    this.commandRects.forEach((rectGroup) => {\n      rectGroup.attr('x', (cmdWithMeta) => {\n        const pos = this._calcRectXPosition(cmdWithMeta.cmd, xScaleNew);\n        // note: pos may be less than zero which is ok, because\n        // otherwise wide rects may disappear too soon.\n        return pos;\n        })\n        .attr('width', (cmdWithMeta) => {\n          return this._calcRectWidth(cmdWithMeta.cmd, xScaleNew);\n        });\n    });\n\n    this._annotationRender.update(xScaleNew);\n  }\n}\n\n\nclass _CommandWithMeta{\n  /**\n   * \n   * @param {Command} cmd \n   * @param {int} group the group assigned within a session\n   */\n  constructor(cmd, group){\n    this.cmd = cmd;\n    this._group = group;\n    this._countOfParallelGroups = -1;\n    this._height = 1000;\n    this._y = 0;\n    this._annotation = null;\n  }\n\n  getGroup(){\n    return this._group;\n  }\n\n  setCountOfParallelGroups(val){\n    this._countOfParallelGroups = val;\n  }\n\n  getCountOfParallelGroups(){\n    return this._countOfParallelGroups;\n  }\n\n  setHeight(val){\n    this._height = val;\n  }\n\n  getHeight(){\n    return this._height;\n  }\n\n  setY(val){\n    this._y = val;\n  }\n\n  getY(){\n    return this._y;\n  }\n\n  setAnnotation(val){\n    this._annotation = val;\n  }\n\n  getAnnotation(){\n    return this._annotation;\n  }\n\n}\n\nclass _Session {\n  constructor() {\n    this._cmdsWithMeta = [];\n    this._finalCmdEndDate = util.DATE_MIN;\n    this._groupFind = new TimelineGroupFind();\n    this._firstCmdStartDate = null;\n    this._sessionGroup = null;\n    this._maxCountOfParallelCmds = null;\n    this._height = null;\n  }\n\n  /**\n   * The passed commands *must* be sorted (asc) by startTime during\n   * subsequent calls of this method.\n   * @param {Command} cmd \n   */\n  addCmd(cmd) {\n    if(this._firstCmdStartDate === null){\n      // commands are sorted by startTime and we are called the first time.\n      this._firstCmdStartDate = cmd.startTime;\n    }\n    // commands are sorted by startTime but the first executed cmd may well finish\n    // last, so incrementally find the final endDate.\n    this._finalCmdEndDate = util.date_max(cmd.endTime, this._finalCmdEndDate);\n\n    const group = this._groupFind.findNextFreeGroup(cmd.startTime, cmd.endTime);\n    this._cmdsWithMeta.push(new _CommandWithMeta(cmd, group));\n\n  }\n\n  setMaxCountOfParallelCommands(val){\n    this._maxCountOfParallelCmds = val;\n  }\n\n  getMaxCountOfParallelCommands(){\n    return this._maxCountOfParallelCmds;\n  }\n\n\n  getSessionStartDate(){\n    return this._firstCmdStartDate;\n  }\n\n  getSessionEndDate(){\n    return this._finalCmdEndDate;\n  }\n\n  setSessionGroup(val){\n    this._sessionGroup = val;\n  }\n\n  getSessionGroup(){\n    return this._sessionGroup;\n  }\n\n  getCmdsWithMeta(){\n    return this._cmdsWithMeta;\n  }\n\n  setHeight(val){\n    this._height = val;\n  }\n\n  getHeight(){\n    return this._height;\n  }\n\n  setY(val){\n    this._y = val;\n  }\n\n  getY(){\n    return this._y;\n  }\n\n\n}\n"
  },
  {
    "path": "html-export/src/stats.js",
    "content": "\nimport PlotMostWrittenFiles from './plot_most_written_files';\nimport PlotCmdCountPerCwd from './plot_cmdcount_per_cwd';\nimport PlotIoPerDir from './plot_io_per_dir';\nimport PlotCmdCountPerSession from './plot_cmdcount_per_session';\nimport { timedForEach } from './util';\n\n\nexport async function generateMiscStats() {\n  const body = d3.select('body');\n\n  if (typeof commands[0].fileWriteEvents === 'undefined') {\n    // when generating from shournal, command-data (like fileWriteEvents)\n    // is loaded later for performance reasons\n    await timedForEach(commands, (cmd, idx) => {\n      const cmdDataTag = d3.select('#commandDataJSON' + idx);\n      const cmdData = JSON.parse(cmdDataTag.html());\n      Object.assign(cmd, cmdData);\n      cmdDataTag.remove();\n    });\n  }\n\n  if (mostFileMods.length === 0 && sessionsMostCmds.length === 0 && \n      cwdCmdCounts.length === 0 && dirIoCounts.length === 0) {\n    // No stats to display...\n    return;\n  }\n\n  body.append('h3')\n    .html('Miscellaneous statistics')\n    .style('padding-top', '1em');\n\n  const miscStatElement = body.append('div')\n    .style('padding-top', '20px')\n    .style('display', 'inline-block');\n\n  if (mostFileMods.length > 0) {\n    const plotMostWrittenFiles = new PlotMostWrittenFiles();\n    plotMostWrittenFiles.generatePlot(commands, miscStatElement);\n  }  \n  \n  if (sessionsMostCmds.length > 0) {\n    const plotCmdCountPerSession = new PlotCmdCountPerSession();\n    plotCmdCountPerSession.generatePlot(commands, miscStatElement);\n  }\n\n  if(cwdCmdCounts.length > 0){\n    const plotCmdCountPerCwd = new PlotCmdCountPerCwd();\n    plotCmdCountPerCwd.generatePlot(commands, miscStatElement);\n  }\n \n  if (dirIoCounts.length > 0) {\n    const plotIoPerDir = new PlotIoPerDir();\n    plotIoPerDir.generatePlot(commands, miscStatElement);\n  }\n\n  $('[data-toggle=\"tooltip\"]').tooltip({\n    delay: { show: 300, hide: 0 },\n  });\n}\n"
  },
  {
    "path": "html-export/src/timeline_group_find.js",
    "content": "\n\nimport TinyQueue from 'tinyqueue';\n\n/**\n * Find \"groups\" in an ordered timeline, so that parallel \n * events get different (low) groups (integers starting from zero). \n * Events are defined by start- and end-date. The container, for\n * whose elements findNextFreeGroup may be called subsequentially,\n * must be ordered by start-date.\n */\nexport default class TimelineGroupFind {\n\n  constructor(){\n    this._lastEndDates = [];\n    this._freeGroups = new TinyQueue();\n  }\n\n  /**\n   * @return {int} lowest free group, starting from 0.\n   * @param {Date} startDate start date of the next time element \n   * @param {Date} endDate end date of the next time element\n   */\n  findNextFreeGroup(startDate, endDate){\n    for (let i = this._lastEndDates.length - 1; i >= 0; i--) {\n      if (startDate > this._lastEndDates[i].endTime) {\n        this._freeGroups.push(this._lastEndDates[i].group);\n        this._lastEndDates.splice(i, 1);\n      }\n    }\n    // if we have free groups (from previous runs) use the lowest free group, \n    // else add a new one\n    const group = (this._freeGroups.length > 0) ? this._freeGroups.pop() : \n      this._lastEndDates.length;\n    this._lastEndDates.push(new _LastEndDateGroup(group, endDate));\n    return group;\n  }\n}\n\n\nclass _LastEndDateGroup {\n  constructor(group, endTime){\n    this.group = group;\n    this.endTime = endTime;\n  }\n}\n"
  },
  {
    "path": "html-export/src/tooltip.js",
    "content": "\n\nexport default class Tooltip {\n\n  constructor(){\n    this._tooltipDiv = d3.select('body')\n    .append('div')\n    .style(\"position\", \"absolute\")\n    .style(\"visibility\", 'hidden')\n    .style(\"background-color\", \"white\")\n    .style(\"border\", \"solid\")\n    .style(\"border-width\", \"2px\")\n    .style(\"border-radius\", \"5px\")\n    .style(\"padding\", \"5px\")\n    .style(\"z-index\", \"1000\")\n    .style(\"pointer-events\", \"none\"); // no flickering in chromium...\n  }\n\n  show(txt, x, y) {\n    // maybe_todo: if tooltip is too much on the right, it gets clipped. Maybe use solution from\n    // https://stackoverflow.com/a/51066294/7015849\n    this._tooltipDiv\n      .style(\"left\", x + \"px\")\n      .style(\"top\", y + \"px\")\n      .style('visibility', 'visible')\n      .html(txt);\n  }\n\n  hide() {\n    this._tooltipDiv.style('visibility', 'hidden');\n  }\n\n}\n"
  },
  {
    "path": "html-export/src/util.js",
    "content": "\nexport class ErrorNotImplemented extends Error { \n  constructor() {\n    super('Required method not implemented');\n  }\n}\n\nexport function sleep(ms) {\n  return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\nexport function getTime() {\n  return new Date().getTime();\n}\n\nexport function date_max(d1, d2){\n  return d1 > d2 ? d1 : d2;\n}\n\nexport function date_min(d1, d2){\n  return d1 < d2 ? d1 : d2;\n}\n\nexport function windowWidth() {\n  return window.innerWidth ||\n    document.documentElement.clientWidth ||\n    document.body.clientWidth;\n}\n\n\nexport function windowHeight() {\n  return window.innerHeight ||\n    document.documentElement.clientHeight ||\n    document.body.clientHeight;\n}\n\n\nexport function assert(condition, message) {\n  if (!condition){\n    throw Error('Assert failed: ' + (message || ''));\n  }\n}\n\nexport const DATE_MIN = new Date(-8640000000000000);\n\n/**\n * non-blocking .foreach array loop.\n * @param {*} array \n * @param {*} func \n */\nexport async function timedForEach(array, func) {\n  const maxTimePerChunk = 200; // max 200ms until next sleep\n  function getTime() {\n    return new Date().getTime();\n  }\n  \n  let lastStart = getTime();\n  for (let i=0; i < array.length; i++) {\n    func(array[i], i, array); \n    const now = getTime();\n    if(now - lastStart > maxTimePerChunk){\n      // enough computation time used\n      await sleep(5);\n      lastStart = now;\n    }\n  }\n}\n\n\n/**\n * Binary search.\n * @param {[]} ar sorted array, may contain duplicate elements.\n * If there are more than one equal elements in the array,\n * the returned value can be the index of any one of the equal elements.\n * @param {*} el element to search for\n * @param {function}  compareFn  A comparator function. The function takes two arguments: (a, b) and returns:\n *        a negative number  if a is less than b;\n *        0 if a is equal to b;\n *        a positive number of a is greater than b.\n * @param {boolean} clipIdx see @return: \n * @return {int} if clipIdx is false: index of of the element in a sorted array or (-n-1) where n\n * is the insertion point for the new element. \n * If clipIdx is true: return an index within the array element bounds, independent of\n * wheter the element exists or not (the best matching existing index is returned).\n */\nexport function binarySearch(ar, el, compareFn, clipIdx=false) {\n  const clipIdxIfOn = (idx) => {\n    if(! clipIdx){\n      return idx;\n    }\n    if (idx < 0) {\n      idx = -(idx + 1);\n    }\n    if (idx >= ar.length) {\n      return ar.length - 1;\n    }\n    return idx;\n  };\n  \n  let m = 0;\n  let n = ar.length - 1;\n  while (m <= n) {\n    const k = (n + m) >> 1;\n    const cmp = compareFn(el, ar[k]);\n    if (cmp > 0) {\n      m = k + 1;\n    } else if(cmp < 0) {\n      n = k - 1;\n    } else {\n      return clipIdxIfOn(k);\n    }\n  }\n  return clipIdxIfOn(-m - 1);\n}\n\n/**\n * Get the directry of a unix path, e.g. the path /home/user/foo\n * would return /home/user.\n * @return {String}\n * @param {String} path \n */\nexport function getDirFromAbsPath(path){\n  return path.substring(0,path.lastIndexOf(\"/\"));\n}\n\n\n"
  },
  {
    "path": "html-export/src/zoom_buttons.js",
    "content": "\n\nexport default class ZoomButtons {\n  \n  /**\n   * @param {d3-element} containerDiv The plot/svg is excepted to be in that div. \n   * Its 'position' should be 'relative', see https://stackoverflow.com/a/10487329\n   * so we can place the buttons in an absolute manner.\n   * @param {d3-element} zoomArea the element used for zooming\n   * @param {d3.zoom} d3Zoom \n   */\n  constructor(containerDiv, zoomArea, d3Zoom) {\n    const btnGroup = containerDiv.append('div');\n\n    const zoomInBtn = this._appendZoomButton(btnGroup, '+')\n      .on(\"click\", () => {\n        d3Zoom.scaleBy(zoomArea.transition().duration(10), 1.2);\n      });\n    const zoomInBtnWidth = parseInt(zoomInBtn.style('width'), 10);\n\n    const zoomOutBtn = this._appendZoomButton(btnGroup, '-')\n      .on(\"click\", () => {\n        d3Zoom.scaleBy(zoomArea.transition().duration(10), 0.8);\n      });\n    const zoomOutBtnWidth = parseInt(zoomOutBtn.style('width'), 10);\n\n    const zoomResetBtn = this._appendZoomButton(btnGroup, '[ ]')\n      .on(\"click\", () => {\n        d3Zoom.transform(zoomArea, d3.zoomIdentity.translate(0, 0).scale(1.0));\n      });\n    const zoomResetBtnWidth = parseInt(zoomResetBtn.style('width'), 10);\n\n    const zoomButtonsWidth = zoomInBtnWidth + zoomOutBtnWidth + zoomResetBtnWidth;\n\n    btnGroup.style('position', 'absolute') // see https://stackoverflow.com/a/10487329 -> \n                                           // parent position should be relative\n      .style('top', 0 + 'px')\n      .style('right', ( zoomButtonsWidth) + 'px');\n      \n  }\n\n  _appendZoomButton(container, text) {\n    return container.append('button')\n      .attr('class', 'zoomButton')\n      .html(text);\n  }\n}\n"
  },
  {
    "path": "html-export/webpack.config.js",
    "content": "const LicenseWebpackPlugin = require('license-webpack-plugin').LicenseWebpackPlugin;\n\nmodule.exports = {\n  optimization:{\n      minimize: false, // maybe_todo: set to true for release\n  },\n  plugins: [\n    new LicenseWebpackPlugin()\n  ],\n  mode:'production',\n}\n"
  },
  {
    "path": "install/90-shournaladd.rules.in",
    "content": "ACTION==\"add\", KERNEL==\"shournalk_ctrl\", RUN=\"/bin/sh -c 'test -f /etc/shournal.d/kgroup && read -r ___kgrp < /etc/shournal.d/kgroup || ___kgrp=${GROUPNAME_SHOURNALK}; chgrp $$___kgrp /sys%p/mark'\"\n"
  },
  {
    "path": "install/CMakeLists.txt",
    "content": "\n# The files here are only required for installation and\n# have no direct relation to source-code.\n\nconfigure_file( postinst.in \"${CMAKE_BINARY_DIR}/debian/postinst\" )\nconfigure_file( prerm.in \"${CMAKE_BINARY_DIR}/debian/prerm\" )\n\n# For the kernelmodule:\nconfigure_file( \"90-shournaladd.rules.in\" \"90-shournaladd.rules\")\nconfigure_file( shournalk-load.conf shournalk.conf )\nconfigure_file( postinst-dkms.in postinst-dkms )\nconfigure_file( prerm-dkms.in prerm-dkms )\n\nif(${SHOURNAL_EDITION} MATCHES \"full|ko\")\n\ninstall(FILES\n    \"${CMAKE_CURRENT_BINARY_DIR}/90-shournaladd.rules\"\n    DESTINATION \"/lib/udev/rules.d\"\n    )\n\ninstall(FILES\n    \"${CMAKE_CURRENT_BINARY_DIR}/shournalk.conf\"\n    DESTINATION \"/usr/lib/modules-load.d\"\n    )\n\nendif() # ${SHOURNAL_EDITION} MATCHES \"full\"\n"
  },
  {
    "path": "install/postinst-dkms.in",
    "content": "# No shebang, we are appended as needed!\n\n# Copyright (C) 2002-2005 Flavio Stanchina\n# Copyright (C) 2005-2006 Aric Cyr\n# Copyright (C) 2007 Mario Limonciello\n# Copyright (C) 2009 Alberto Milone\n\n# Copyright (C) 2021 Tycho Kirchner: modified to fit shournal's needs\n\n\nDKMS_NAME=shournalk # name of the kernel module\nDKMS_PACKAGE_NAME=shournal # we bundle the dkms package inside shournal for easy installation.\nDKMS_VERSION=\"${shournal_version}\"\n\npostinst_found=0\n\ncase \"$1\" in\n\tconfigure)\n\t\tfor DKMS_POSTINST in /usr/lib/dkms/common.postinst /usr/share/$DKMS_PACKAGE_NAME/postinst; do\n\t\t\tif [ -f $DKMS_POSTINST ]; then\n\t\t\t\t$DKMS_POSTINST $DKMS_NAME $DKMS_VERSION /usr/share/$DKMS_PACKAGE_NAME \"\" $2\n\t\t\t\tpostinst_found=1\n\t\t\t\tbreak\n\t\t\tfi\n\t\tdone\n\t\tif [ \"$postinst_found\" -eq 1 ]; then\n\t\t\t# Don't modprobe -r shournalk - we don't want to disturb running processes.\n\t\t\tmodprobe shournalk || :\n\t\telse\n\t\t\techo \"ERROR: DKMS version is too old and $DKMS_PACKAGE_NAME was not\"\n\t\t\techo \"built with legacy DKMS support.\"\n\t\t\techo \"You must either rebuild $DKMS_PACKAGE_NAME with legacy postinst\"\n\t\t\techo \"support or upgrade DKMS to a more current version.\"\n\t\t\texit 1\n\t\tfi\n\t;;\nesac\n\nexit 0\n\n"
  },
  {
    "path": "install/postinst.in",
    "content": "#!/bin/sh\nset -e\ngetent group ${MSENTER_GROUPNAME} || groupadd ${MSENTER_GROUPNAME}\ngetent group ${GROUPNAME_SHOURNALK} || groupadd ${GROUPNAME_SHOURNALK}\n\n# do not call exit, this file might be appended to..\n"
  },
  {
    "path": "install/prerm-dkms.in",
    "content": "# No shebang, we are appended as needed!\n\nset -e\n\nDKMS_NAME=shournalk\nDKMS_VERSION=\"${shournal_version}\"\n\ncase \"$1\" in\n    remove|upgrade|deconfigure)\n      if [  \"$(dkms status -m $DKMS_NAME -v $DKMS_VERSION)\" ]; then\n         dkms remove -m $DKMS_NAME -v $DKMS_VERSION --all\n      fi\n    ;;\nesac\n\n\n\n\nexit 0\n"
  },
  {
    "path": "install/prerm.in",
    "content": "#!/bin/sh\nset -e\n\n# This file is intentionally left blank for consistency with\n# postinst-dkms\n\n# do not call exit, this file might be appended to..\n"
  },
  {
    "path": "install/shournalk-load.conf",
    "content": "shournalk\n"
  },
  {
    "path": "kernel/CMakeLists.txt",
    "content": "\n# Find kernel headers\nlist(APPEND CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/cmake\")\nfind_package(KernelHeaders REQUIRED)\n\nset(common_prefix \"../src/common\")\n\ninclude_directories(\n    ${KERNELHEADERS_INCLUDE_DIRS}\n    \"${common_prefix}\"\n    )\n\n# Add a dummy-library to satisfy IDE-intellisense -\n# plain 'make' does the real work (see below) and file Kbuild.\n# Also used for copying files to build and dkms-dir, so add all source-files here.\nfile(GLOB kernel_src\n    \"*.h\"\n    \"*.c\"\n    \"${common_prefix}/xxhash_common.h\"\n    \"${common_prefix}/xxhash_common.c\"\n    \"${common_prefix}/user_kernerl.h\"\n)\nadd_library(lib_shournalk_dummy\n    EXCLUDE_FROM_ALL\n    ${kernel_src}\n)\n\nset_target_properties(lib_shournalk_dummy PROPERTIES LANGUAGE C)\n\n# avoid MODULE_LICENSE-warnings.\ntarget_compile_definitions(lib_shournalk_dummy PRIVATE\n  -D__KERNEL__\n  -DMODULE\n  -DCONFIG_MEMCG\n)\n\nif(CMAKE_BUILD_TYPE MATCHES Debug)\n    set(k_extra_cflags \"-DDEBUG\")\nelseif(CMAKE_BUILD_TYPE MATCHES RelWithDebInfo)\n    # profile\n    set(k_extra_cflags \"-O3 -DPROFILE\")\nelse()\n    # release\n    set(k_extra_cflags \"-O3\")\nendif()\n\n# The kernel module is compiled in-tree, after we\n# copied the source-files there.\n# This has the advantage that the Kbuild file\n# can be generated and that we compile\n# the source code in the same way as\n# a later user from the installed /usr/src/shournalk*.\nconfigure_file(Kbuild \"${CMAKE_CURRENT_BINARY_DIR}/\" @ONLY)\nadd_custom_target(shournalk ALL\n  COMMAND $(MAKE) --file=Kbuild EXTRA_CFLAGS=${k_extra_cflags} shournal_cmake_build=true\n    WORKING_DIRECTORY \"${CMAKE_CURRENT_BINARY_DIR}\"\n    VERBATIM\n    DEPENDS shournalk_dep_file\n)\n# Better be safe and copy the files on cmake and make.\nfile(COPY ${kernel_src} DESTINATION \"${CMAKE_CURRENT_BINARY_DIR}\" )\nadd_custom_command(OUTPUT shournalk_dep_file\n        PRE_BUILD\n        COMMAND ${CMAKE_COMMAND} -E copy_if_different\n                ${kernel_src}\n                \"${CMAKE_CURRENT_BINARY_DIR}\")\n\n# Also copy src-files for dkms\nset(dkms_dir \"${CMAKE_BINARY_DIR}/dkms\")\nadd_custom_command(TARGET shournalk POST_BUILD COMMAND ${CMAKE_COMMAND}\n                   -E copy_if_different ${kernel_src} \"${dkms_dir}\")\nconfigure_file(Kbuild \"${dkms_dir}/Makefile\" @ONLY)\nconfigure_file(\"dkms.conf.in\" \"${dkms_dir}/dkms.conf\" @ONLY)\n\ninstall( DIRECTORY \"${dkms_dir}/\" DESTINATION \"${shournal_install_dir_shournalk_src}\" )\n\n\n\n\n\n"
  },
  {
    "path": "kernel/Kbuild",
    "content": "\n$(info building kernel module shournalk version @shournal_version@)\n\nobj-m := shournalk.o\n\n# shournal_version filled by cmake (see Makefile in generated dkms dir)\nCFLAGS_MODULE += \"-DSHOURNAL_VERSION=\\\"@shournal_version@\\\"\"\n\n\nshournalk-y += shournalk_main.o event_handler.o shournalk_sysfs.o \\\n               tracepoint_helper.o event_target.o kutil.o event_queue.o \\\n               event_consumer.o shournal_kio.o xxhash_shournalk.o \\\n               kpathtree.o shournalk_test.o shournalk_global.o \\\n               hash_table_str.o kfileextensions.o \\\n               event_consumer_cache.o \\\n               xxhash_common.o \\\n\nPWD         := $(shell pwd)\nKVER        ?= $(shell uname -r)\nKBASE       ?= /lib/modules/$(KVER)\nKBUILD_DIR  ?= $(KBASE)/build\n\nifeq ($(shournal_cmake_build), true)\n$(info building kernel module from cmake)\nall:\n\t@$(MAKE) -C $(KBUILD_DIR) M=$(PWD) modules\n\nelse\n# usually called by dkms but can also be used standalone\n\nno_strip    ?= false\n\ncommon_make_args = @$(MAKE) -C $(KBUILD_DIR) M=$(PWD) EXTRA_CFLAGS=\"-O3\"\n\n# strip module by default\nifndef INSTALL_MOD_STRIP\nifneq ($(no_strip), true)\n    $(info stripping is ON by default, use no_strip=true or define \\\n      INSTALL_MOD_STRIP if this is not desired.)\n    common_make_args += INSTALL_MOD_STRIP=1\nendif\nendif\n\n\nall:\n\t$(common_make_args) modules\n\ninstall:\n\t$(common_make_args) modules_install\n\tdepmod -A\nendif\n\nclean:\n\t@rm -rf *~ *.o *.mod *.mod.c .*.cmd .tmp_versions\n\n"
  },
  {
    "path": "kernel/LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 2, June 1991\n\n Copyright (C) 1989, 1991 Free Software Foundation, Inc.,\n 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The licenses for most software are designed to take away your\nfreedom to share and change it.  By contrast, the GNU General Public\nLicense is intended to guarantee your freedom to share and change free\nsoftware--to make sure the software is free for all its users.  This\nGeneral Public License applies to most of the Free Software\nFoundation's software and to any other program whose authors commit to\nusing it.  (Some other Free Software Foundation software is covered by\nthe GNU Lesser General Public License instead.)  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthis service if you wish), that you receive source code or can get it\nif you want it, that you can change the software or use pieces of it\nin new free programs; and that you know you can do these things.\n\n  To protect your rights, we need to make restrictions that forbid\nanyone to deny you these rights or to ask you to surrender the rights.\nThese restrictions translate to certain responsibilities for you if you\ndistribute copies of the software, or if you modify it.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must give the recipients all the rights that\nyou have.  You must make sure that they, too, receive or can get the\nsource code.  And you must show them these terms so they know their\nrights.\n\n  We protect your rights with two steps: (1) copyright the software, and\n(2) offer you this license which gives you legal permission to copy,\ndistribute and/or modify the software.\n\n  Also, for each author's protection and ours, we want to make certain\nthat everyone understands that there is no warranty for this free\nsoftware.  If the software is modified by someone else and passed on, we\nwant its recipients to know that what they have is not the original, so\nthat any problems introduced by others will not reflect on the original\nauthors' reputations.\n\n  Finally, any free program is threatened constantly by software\npatents.  We wish to avoid the danger that redistributors of a free\nprogram will individually obtain patent licenses, in effect making the\nprogram proprietary.  To prevent this, we have made it clear that any\npatent must be licensed for everyone's free use or not licensed at all.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                    GNU GENERAL PUBLIC LICENSE\n   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n  0. This License applies to any program or other work which contains\na notice placed by the copyright holder saying it may be distributed\nunder the terms of this General Public License.  The \"Program\", below,\nrefers to any such program or work, and a \"work based on the Program\"\nmeans either the Program or any derivative work under copyright law:\nthat is to say, a work containing the Program or a portion of it,\neither verbatim or with modifications and/or translated into another\nlanguage.  (Hereinafter, translation is included without limitation in\nthe term \"modification\".)  Each licensee is addressed as \"you\".\n\nActivities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope.  The act of\nrunning the Program is not restricted, and the output from the Program\nis covered only if its contents constitute a work based on the\nProgram (independent of having been made by running the Program).\nWhether that is true depends on what the Program does.\n\n  1. You may copy and distribute verbatim copies of the Program's\nsource code as you receive it, in any medium, provided that you\nconspicuously and appropriately publish on each copy an appropriate\ncopyright notice and disclaimer of warranty; keep intact all the\nnotices that refer to this License and to the absence of any warranty;\nand give any other recipients of the Program a copy of this License\nalong with the Program.\n\nYou may charge a fee for the physical act of transferring a copy, and\nyou may at your option offer warranty protection in exchange for a fee.\n\n  2. You may modify your copy or copies of the Program or any portion\nof it, thus forming a work based on the Program, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n    a) You must cause the modified files to carry prominent notices\n    stating that you changed the files and the date of any change.\n\n    b) You must cause any work that you distribute or publish, that in\n    whole or in part contains or is derived from the Program or any\n    part thereof, to be licensed as a whole at no charge to all third\n    parties under the terms of this License.\n\n    c) If the modified program normally reads commands interactively\n    when run, you must cause it, when started running for such\n    interactive use in the most ordinary way, to print or display an\n    announcement including an appropriate copyright notice and a\n    notice that there is no warranty (or else, saying that you provide\n    a warranty) and that users may redistribute the program under\n    these conditions, and telling the user how to view a copy of this\n    License.  (Exception: if the Program itself is interactive but\n    does not normally print such an announcement, your work based on\n    the Program is not required to print an announcement.)\n\nThese requirements apply to the modified work as a whole.  If\nidentifiable sections of that work are not derived from the Program,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works.  But when you\ndistribute the same sections as part of a whole which is a work based\non the Program, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote it.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Program.\n\nIn addition, mere aggregation of another work not based on the Program\nwith the Program (or with a work based on the Program) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n  3. You may copy and distribute the Program (or a work based on it,\nunder Section 2) in object code or executable form under the terms of\nSections 1 and 2 above provided that you also do one of the following:\n\n    a) Accompany it with the complete corresponding machine-readable\n    source code, which must be distributed under the terms of Sections\n    1 and 2 above on a medium customarily used for software interchange; or,\n\n    b) Accompany it with a written offer, valid for at least three\n    years, to give any third party, for a charge no more than your\n    cost of physically performing source distribution, a complete\n    machine-readable copy of the corresponding source code, to be\n    distributed under the terms of Sections 1 and 2 above on a medium\n    customarily used for software interchange; or,\n\n    c) Accompany it with the information you received as to the offer\n    to distribute corresponding source code.  (This alternative is\n    allowed only for noncommercial distribution and only if you\n    received the program in object code or executable form with such\n    an offer, in accord with Subsection b above.)\n\nThe source code for a work means the preferred form of the work for\nmaking modifications to it.  For an executable work, complete source\ncode means all the source code for all modules it contains, plus any\nassociated interface definition files, plus the scripts used to\ncontrol compilation and installation of the executable.  However, as a\nspecial exception, the source code distributed need not include\nanything that is normally distributed (in either source or binary\nform) with the major components (compiler, kernel, and so on) of the\noperating system on which the executable runs, unless that component\nitself accompanies the executable.\n\nIf distribution of executable or object code is made by offering\naccess to copy from a designated place, then offering equivalent\naccess to copy the source code from the same place counts as\ndistribution of the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n  4. You may not copy, modify, sublicense, or distribute the Program\nexcept as expressly provided under this License.  Any attempt\notherwise to copy, modify, sublicense or distribute the Program is\nvoid, and will automatically terminate your rights under this License.\nHowever, parties who have received copies, or rights, from you under\nthis License will not have their licenses terminated so long as such\nparties remain in full compliance.\n\n  5. You are not required to accept this License, since you have not\nsigned it.  However, nothing else grants you permission to modify or\ndistribute the Program or its derivative works.  These actions are\nprohibited by law if you do not accept this License.  Therefore, by\nmodifying or distributing the Program (or any work based on the\nProgram), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Program or works based on it.\n\n  6. Each time you redistribute the Program (or any work based on the\nProgram), the recipient automatically receives a license from the\noriginal licensor to copy, distribute or modify the Program subject to\nthese terms and conditions.  You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties to\nthis License.\n\n  7. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Program at all.  For example, if a patent\nlicense would not permit royalty-free redistribution of the Program by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Program.\n\nIf any portion of this section is held invalid or unenforceable under\nany particular circumstance, the balance of the section is intended to\napply and the section as a whole is intended to apply in other\ncircumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system, which is\nimplemented by public license practices.  Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n  8. If the distribution and/or use of the Program is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Program under this License\nmay add an explicit geographical distribution limitation excluding\nthose countries, so that distribution is permitted only in or among\ncountries not thus excluded.  In such case, this License incorporates\nthe limitation as if written in the body of this License.\n\n  9. The Free Software Foundation may publish revised and/or new versions\nof the General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\nEach version is given a distinguishing version number.  If the Program\nspecifies a version number of this License which applies to it and \"any\nlater version\", you have the option of following the terms and conditions\neither of that version or of any later version published by the Free\nSoftware Foundation.  If the Program does not specify a version number of\nthis License, you may choose any version ever published by the Free Software\nFoundation.\n\n  10. If you wish to incorporate parts of the Program into other free\nprograms whose distribution conditions are different, write to the author\nto ask for permission.  For software which is copyrighted by the Free\nSoftware Foundation, write to the Free Software Foundation; we sometimes\nmake exceptions for this.  Our decision will be guided by the two goals\nof preserving the free status of all derivatives of our free software and\nof promoting the sharing and reuse of software generally.\n\n                            NO WARRANTY\n\n  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\nFOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN\nOTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\nPROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\nOR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS\nTO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE\nPROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\nREPAIR OR CORRECTION.\n\n  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\nREDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\nINCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\nOUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\nTO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\nYOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\nPROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGES.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software; you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation; either version 2 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License along\n    with this program; if not, write to the Free Software Foundation, Inc.,\n    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nAlso add information on how to contact you by electronic and paper mail.\n\nIf the program is interactive, make it output a short notice like this\nwhen it starts in an interactive mode:\n\n    Gnomovision version 69, Copyright (C) year name of author\n    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, the commands you use may\nbe called something other than `show w' and `show c'; they could even be\nmouse-clicks or menu items--whatever suits your program.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the program, if\nnecessary.  Here is a sample; alter the names:\n\n  Yoyodyne, Inc., hereby disclaims all copyright interest in the program\n  `Gnomovision' (which makes passes at compilers) written by James Hacker.\n\n  <signature of Ty Coon>, 1 April 1989\n  Ty Coon, President of Vice\n\nThis General Public License does not permit incorporating your program into\nproprietary programs.  If your program is a subroutine library, you may\nconsider it more useful to permit linking proprietary applications with the\nlibrary.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.\n"
  },
  {
    "path": "kernel/cmake/FindKernelHeaders.cmake",
    "content": "\n# get kernel release\nexecute_process(\n        COMMAND uname -r\n        OUTPUT_VARIABLE KERNEL_RELEASE\n        OUTPUT_STRIP_TRAILING_WHITESPACE\n)\nstring(REGEX REPLACE \"-[^-]+$\" \"\" KERNEL_RELEASE_NO_ARCH ${KERNEL_RELEASE})\nstring(REGEX REPLACE \"^([0-9]+\\.[0-9]+).*$\" \"\\\\1\" KERNEL_RELEASE_HWE ${KERNEL_RELEASE})\n\n# Find the headers\nforeach(header_path\n        /usr/src/linux-headers-${KERNEL_RELEASE_NO_ARCH}-common # Debian\n        /usr/src/linux-${KERNEL_RELEASE_NO_ARCH}/include        # Opensuse\n        /usr/src/linux-headers-${KERNEL_RELEASE_NO_ARCH}        # Ubuntu\n\t/usr/src/linux-hwe-${KERNEL_RELEASE_HWE}-headers-${KERNEL_RELEASE_NO_ARCH}        # Ubuntu HWE\n        )\n    if(EXISTS \"${header_path}\")\n        set(KERNELHEADERS_DIR \"${header_path}\")\n        break()\n    endif()\nendforeach()\n\nif(NOT (KERNELHEADERS_DIR))\n    # Red Hat (?)\n    find_path(KERNELHEADERS_DIR\n        include/linux/user.h\n        PATHS /usr/src/kernels/${KERNEL_RELEASE})\nendif()\n\n\nmessage(STATUS \"Kernel release: ${KERNEL_RELEASE}\")\n\nif (KERNELHEADERS_DIR)\nset(KERNELHEADERS_INCLUDE_DIRS\n        ${KERNELHEADERS_DIR}/include\n        ${KERNELHEADERS_DIR}/arch/x86/include)\n    message(STATUS \"Kernel headers: ${KERNELHEADERS_INCLUDE_DIRS}\")\nelse()\n    message(WARNING \"Unable to find kernel headers!\")\nendif()\n"
  },
  {
    "path": "kernel/dkms.conf.in",
    "content": "PACKAGE_NAME=\"shournalk\"\nPACKAGE_VERSION=\"@shournal_version@\"\nCLEAN=\"make clean\"\nMAKE[0]=\"make all KVER=$kernelver\"\nBUILT_MODULE_NAME[0]=\"shournalk\"\nDEST_MODULE_LOCATION[0]=\"/updates/dkms\"\nAUTOINSTALL=\"yes\"\n"
  },
  {
    "path": "kernel/event_consumer.c",
    "content": "\n#include <linux/compiler.h>\n#include <linux/dcache.h>\n#include <linux/file.h>\n#include <linux/fdtable.h>\n#include <linux/kthread.h>\n#include <linux/slab.h>\n#include <linux/stat.h>\n#include <linux/sched.h>\n#include <linux/fadvise.h>\n#include <linux/kthread.h>\n#include <linux/mmu_context.h>\n#include <linux/splice.h>\n#include <asm/uaccess.h>\n\n\n#include \"event_consumer.h\"\n#include \"event_consumer_cache.h\"\n#include \"event_queue.h\"\n#include \"event_target.h\"\n#include \"kutil.h\"\n#include \"shournal_kio.h\"\n#include \"shournalk_user.h\"\n#include \"kpathtree.h\"\n\n#include \"xxhash_common.h\"\n\n#define CONSUMER_CIRC_BUFSIZE (1 << 15)\n\n\nstatic inline bool __path_is_hidden(const char* pathname, int path_len){\n    return strnstr(pathname, \"/.\", path_len) != NULL;\n}\n\nstatic inline struct file *\n__reopen_file_silent(const struct path *path, const struct cred * cred){\n    return dentry_open(path,\n                       O_RDONLY | O_NOATIME | FMODE_NONOTIFY,\n                       cred);\n}\n\n\n\n#ifdef DEBUG\nstatic void __dbg_print_event(struct event_target* t __attribute__ ((unused)),\n                              struct close_event* ev,\n                     const char* msg){\n\n        const char* pathname;\n        char* buf = (char*)(__get_free_page(GFP_KERNEL));\n        if(! buf){\n            pr_devel(\"__get_free_page failed!\\n\");\n            return;\n        }\n        pathname = d_path(&ev->path, buf, PATH_MAX);\n        if(IS_ERR(pathname)){\n            pr_devel(\"%s failed to resolve pathname..\\n\", msg);\n        } else {\n            pr_devel(\"%s %s\\n\", msg, pathname);\n        }\n        free_page((ulong)buf);\n}\n#else\nstatic void __dbg_print_event(struct event_target* t __attribute__ ((unused)),\n                              struct close_event* ev __attribute__ ((unused)),\n                              const char* msg __attribute__ ((unused))){}\n#endif\n\n\n\nstatic bool __write_to_target_file_safe(struct event_target* event_target,\n                                      const void *buf, size_t count){\n    const char* msg;\n    ssize_t ret = shournal_kio_write(event_target->file, buf, count);\n    if(likely(ret == (ssize_t)count)){\n        return true;\n    }\n\n    WRITE_ONCE(event_target->ERROR, true);\n    if(ret >= 0) {\n        ret = EIO;\n        msg = \"not all bytes written\";\n    } else {\n        ret = -ret;\n        msg = \"errno\";\n    }\n    pr_debug(\"Failed to write to event target with parent pid %d - %s: %ld\\n\",\n             event_target->caller_tsk->pid, msg, ret);\n    event_target_write_result_to_user_ONCE(event_target, (int)ret);\n    return false;\n}\n\n/// xxhash the passed file\nstatic void __do_hash_file(struct partial_xxhash* part_hash,\n                               struct file* file,\n                               loff_t file_size,\n                               const struct qstr* filename,\n                               struct shournalk_close_event* user_event){\n    struct partial_xxhash_result hash_result;\n    long ret;\n    kutil_WARN_DBG(file_size == 0, \"file_size == 0\");\n    kutil_WARN_DBG(part_hash->chunksize == 0, \"part_hash->chunksize == 0\");\n\n    part_hash->seekstep =\n            file_size / part_hash->max_count_of_reads;\n    if(unlikely(ret = partial_xxh_digest_file(file,\n                                      part_hash,\n                                      &hash_result))){\n        pr_devel(\"failed to partial_hash file with %ld - %s\\n\", ret, filename->name);\n        goto invalidate_hash;\n    }\n\n    if(unlikely(hash_result.count_of_bytes == 0)){\n        // zero bytes read - file became empty in between?\n        pr_devel(\"zero bytes read for previously non-empty file\");\n        goto invalidate_hash;\n    }\n    user_event->hash_is_null = false;\n    user_event->hash = hash_result.hash;\n\n    return;\n\n\ninvalidate_hash:\n    user_event->hash = 0;\n    user_event->hash_is_null = true;\n}\n\n\n/// rewrite user_event after having corrected the file content\n/// size with the actual number of bytes written (should\n/// happen rarely)\nstatic bool\n__correct_file_event_at_pos(struct event_target* t, loff_t pos,\n                            struct shournalk_close_event* user_event){\n    ssize_t ret;\n    struct file* dest = t->file->__file;\n    if( (ret = kutil_kernel_write(dest, user_event,\n                       sizeof(struct shournalk_close_event),\n                       &pos\n                       )) !=  sizeof(struct shournalk_close_event)){\n        WRITE_ONCE(t->ERROR, true);\n        pr_debug(\"Failed to correct file content size for target %s, returned %ld\\n\",\n                 t->file_init_path, ret);\n        event_target_write_result_to_user_ONCE(t, (int)-ret);\n        return false;\n    }\n    return true;\n}\n\n\n/// write size bytes from src to\n/// our target file\nstatic bool\n__write_file_content(struct event_target* t,\n                     struct file* src,\n                     loff_t size,\n                     struct shournalk_close_event* user_event){\n    ssize_t ret;\n    struct file* dest = t->file->__file;\n    loff_t src_pos = 0;\n    loff_t old_dst_pos;\n    loff_t written_size;\n\n    if(unlikely(! event_consumer_flush_target_file_safe(t))){\n        return false;\n    }\n\n    file_start_write(dest);\n    old_dst_pos = dest->f_pos;\n    ret = do_splice_direct(src, &src_pos, dest, &dest->f_pos, size, 0);\n    written_size = dest->f_pos - old_dst_pos;\n    file_end_write(dest);\n\n    if(unlikely(written_size != size)){\n        // before having written the file content, the\n        // close event was written, which we overwrite now.\n        // seek back and correct the written bytes\n        loff_t correct_pos = old_dst_pos - sizeof(struct shournalk_close_event);\n        user_event->bytes = written_size;\n        pr_debug(\"Only %lld of %lld bytes written - attempting \"\n                 \"to correct this...\\n\", written_size, size);\n        return __correct_file_event_at_pos(t, correct_pos, user_event);\n\n    }\n    return true;\n}\n\n\nstatic bool __do_log_file_event(struct event_target* t,\n                                struct close_event* close_ev,\n                                int event_flags,\n                                struct qstr* filename,\n                                bool store_whole_file,\n                                struct consumer_cache_entry* directory,\n                                struct path* last_directory){\n    struct shournalk_close_event user_event;\n    struct file* file = NULL;\n    const struct inode* inode = close_ev->path.dentry->d_inode;\n\n    if(current->mm && unlikely(current->mm->owner != t->caller_tsk)){\n        WRITE_ONCE(t->ERROR, true);\n        // See comment in event_consumer_thread_setup\n        // for the rationale.\n        pr_debug(\"mm->owner does not belong to pid %d \"\n                 \"any more - most likely the caller died. \"\n                 \"Event-logging was stopped.\\n\", t->caller_tsk->pid);\n        event_target_write_result_to_user_ONCE(t, EREMCHG);\n\n        return false;\n    }\n\n\n    user_event.flags = event_flags;\n    user_event.mtime = kutil_get_mtime_sec(inode);\n    user_event.size = inode->i_size;\n    user_event.mode = inode->i_mode;\n    user_event.hash_is_null = t->partial_hash.chunksize == 0 ||\n                              unlikely(user_event.size == 0);\n\n    if(! user_event.hash_is_null || store_whole_file){\n        file = __reopen_file_silent(&close_ev->path, t->cred);\n        if( unlikely(IS_ERR_OR_NULL(file))) {\n            pr_devel(\"failed to reopen file %s\\n\", filename->name);\n            user_event.hash_is_null = true;\n            store_whole_file = false;\n        } else {\n            long ret;\n            // maybe_todo: only set to random, if ! store_whole_file?\n            ret = vfs_fadvise(file, 0,0, POSIX_FADV_RANDOM);\n            if(ret){\n                pr_devel(\"vfs_fadvise failed with %ld\\n\", ret);\n            }\n\n        }\n    }\n    user_event.bytes = (store_whole_file) ? user_event.size : 0;\n    if(! user_event.hash_is_null){\n        __do_hash_file(&t->partial_hash, file, user_event.size, filename, &user_event);\n    }\n\n    if(unlikely(! __write_to_target_file_safe(\n                    t, &user_event,\n                    sizeof (struct shournalk_close_event)))\n            ){\n        return false;\n    }\n\n    if(unlikely(store_whole_file)){\n        if(__write_file_content(t, file, user_event.bytes, &user_event)){\n            t->stored_files_count++;\n        }\n    }\n    if(! IS_ERR_OR_NULL(file)){\n        fput(file);\n    }\n\n    // Only write directory path, if not written before\n    if(! path_equal(last_directory, &directory->dir)){\n        __write_to_target_file_safe(t, directory->dirname, directory->dirname_len);\n        __write_to_target_file_safe(t, \"/\", 1);\n        *last_directory = directory->dir;\n    }\n    __write_to_target_file_safe(t, filename->name, filename->len + 1);\n\n    return true;\n}\n\n\nstatic void\n__handle_read_event(struct event_target* t,\n                    struct close_event* close_ev,\n                    bool may_write){\n    const struct shounalk_settings* sets = &t->settings;\n    bool general_discard;\n    bool store_discard;\n    struct kutil_name_snapshot name_snapshot;\n    struct qstr* filename;\n    const struct inode* inode = close_ev->path.dentry->d_inode;\n    int path_is_hidden = -1;\n    struct path* path = &close_ev->path;\n    struct consumer_cache_entry* d_ent;\n    bool cache_entry_existed;\n\n    t->r_examined_count++;\n\n    kutil_WARN_DBG(! t->r_enable, \"! t->r_enable\");\n\n    if(inode->i_nlink == 0){\n        t->r_deleted_count++;\n        pr_devel(\"ignore deleted file\\n\");\n        return;\n    }\n    // Where possible, we check conditions in ascending order\n    // of the expected computational overhead\n    general_discard = t->r_includes.n_paths == 0 ||\n                      (sets->r_only_writable && ! may_write);\n\n    store_discard =  t->script_includes.n_paths == 0 ||\n                     (sets->r_store_only_writable && ! may_write  ) ||\n                     inode->i_size > sets->r_store_max_size ||\n                     t->stored_files_count >= sets->r_store_max_count_of_files;\n    if(general_discard && store_discard){\n        // maybe_todo: put this early-discard code directly into the\n        // __fput-handler, to avoid the ringbuffer altogether.\n        // __dbg_print_event(t, close_ev, \"early discard\");\n        return;\n    }\n\n    d_ent = consumer_cache_find(\n                    t->event_consumer.r_cache,\n                    path->mnt, READ_ONCE(path->dentry->d_parent),\n                &cache_entry_existed);\n    if(IS_ERR(d_ent)){\n        return;\n    }\n    if(cache_entry_existed){\n        t->_dircache_hits++;\n        general_discard |= d_ent->flags & DIRCACHE_R_OFF;\n        store_discard |= d_ent->flags & DIRCACHE_SCRIPT_OFF;\n    } else {\n        general_discard |=\n            !kpathtree_is_subpath(&t->r_includes,d_ent->dirname,d_ent->dirname_len,true) ||\n             kpathtree_is_subpath(&t->r_excludes,d_ent->dirname,d_ent->dirname_len,true) ||\n                  (sets->r_exclude_hidden &&\n                  (path_is_hidden = __path_is_hidden(d_ent->dirname,d_ent->dirname_len)));\n        store_discard |=\n            !kpathtree_is_subpath(&t->script_includes,d_ent->dirname,d_ent->dirname_len,true) ||\n             kpathtree_is_subpath(&t->script_excludes,d_ent->dirname,d_ent->dirname_len,true);\n\n        if(! store_discard && sets->r_store_exclude_hidden){\n            // use hidden result from above, if possible\n            store_discard = (path_is_hidden != -1)\n                    ? path_is_hidden\n                    : __path_is_hidden(d_ent->dirname,d_ent->dirname_len);\n        }\n        d_ent->flags = 0;\n        if(general_discard){\n            d_ent->flags |= DIRCACHE_R_OFF;\n        }\n        if(store_discard){\n            d_ent->flags |= DIRCACHE_SCRIPT_OFF;\n        }\n    }\n    if(general_discard && store_discard){\n        return;\n    }\n    // file only settings\n\n    kutil_take_name_snapshot(&name_snapshot, path->dentry);\n    filename = &name_snapshot.name;\n\n    general_discard |= (sets->r_exclude_hidden && filename->name[0] == '.');\n\n    store_discard |= (sets->r_store_exclude_hidden && filename->name[0] == '.') ||\n           (t->script_ext.n_ext &&\n            ! file_extensions_contain(&t->script_ext, (const char*)filename->name,\n                                      filename->len));\n\n    if(general_discard && store_discard){\n        // pr_devel(\"discarding %s\\n\", filename);\n        goto out_release;\n    }\n\n    // Capture store-events regardless of r_max_event_count\n    if(store_discard && t->r_event_count >= sets->r_max_event_count){\n        t->r_dropped_count++;\n        goto out_release;\n    }\n\n    // user really wants this event\n    if(likely(__do_log_file_event(t, close_ev, O_RDONLY, filename, !store_discard,\n                        d_ent, &t->event_consumer.r_last_written_path ))){\n        t->r_event_count++;\n    }\n\nout_release:\n    kutil_release_name_snapshot(&name_snapshot);\n}\n\nstatic void\n__handle_write_event(struct event_target* t,\n                    struct close_event* close_ev,\n                    bool may_write){\n    const struct shounalk_settings* sets = &t->settings;\n    struct kutil_name_snapshot name_snapshot;\n    const struct inode* inode = close_ev->path.dentry->d_inode;\n    struct path* path = &close_ev->path;\n    struct consumer_cache_entry* d_ent;\n    bool cache_entry_existed;\n\n    t->w_examined_count++;\n\n    // __dbg_print_event(t, close_ev, \"processing wevent\");\n\n    if(inode->i_nlink == 0){\n        t->w_deleted_count++;\n        pr_devel(\"ignore deleted file\\n\");\n        return;\n    }\n    if(unlikely(! may_write)){\n        pr_devel(\"ignore not writable file\\n\");\n        return;\n    }\n\n    d_ent = consumer_cache_find(\n                    t->event_consumer.w_cache,\n                    path->mnt, READ_ONCE(path->dentry->d_parent),\n                &cache_entry_existed);\n    if(IS_ERR(d_ent)){\n        return;\n    }\n    // Check if we have seen and accepted our d_parent-dir before.\n    if(cache_entry_existed){\n        t->_dircache_hits++;\n        if(d_ent->flags & DIRCACHE_W_OFF){\n            return;\n        }\n    } else {\n        if(!kpathtree_is_subpath(&t->w_includes,d_ent->dirname,d_ent->dirname_len,true) ||\n            kpathtree_is_subpath(&t->w_excludes,d_ent->dirname,d_ent->dirname_len,true) ||\n            (sets->w_exclude_hidden && __path_is_hidden(d_ent->dirname,d_ent->dirname_len)))\n        {\n            d_ent->flags = DIRCACHE_W_OFF;\n            return;\n        }\n        d_ent->flags = 0;\n    }\n    // directory was accepted - check file:\n    kutil_take_name_snapshot(&name_snapshot, path->dentry);\n\n    if(sets->w_exclude_hidden && name_snapshot.name.name[0] == '.'){\n        goto out_release;\n    }\n    if(t->w_event_count >= sets->w_max_event_count){\n        t->w_dropped_count++;\n        goto out_release;\n    }\n\n    // user really wants this event\n    if(likely(__do_log_file_event(t, close_ev, O_WRONLY, &name_snapshot.name, false,\n                        d_ent, &t->event_consumer.w_last_written_path))){\n        t->w_event_count++;\n    }\n\nout_release:\n    kutil_release_name_snapshot(&name_snapshot);\n}\n\n////////////////////////////////////////////////////////////////////////////\n\nlong event_consumer_init(struct event_consumer* consumer){\n    memset(consumer, 0, sizeof (struct event_consumer));\n\n    // To avoid alignment of struct close_event to buffer size,\n    // we simply allocate a little more space, so we do not\n    // overflow right before the ring-buffer wrap-around.\n    consumer->circ_buf.buf = kvzalloc(CONSUMER_CIRC_BUFSIZE + sizeof (struct close_event),\n                                      SHOURNALK_GFP | __GFP_RETRY_MAYFAIL);\n    if(! consumer->circ_buf.buf)\n        return -ENOMEM;\n    consumer->w_cache = kvzalloc(sizeof (struct consumer_cache),\n                                 SHOURNALK_GFP | __GFP_RETRY_MAYFAIL);\n    if(! consumer->w_cache)\n        goto err1;\n\n    consumer->r_cache = kvzalloc(sizeof (struct consumer_cache),\n                                 SHOURNALK_GFP | __GFP_RETRY_MAYFAIL);\n    if(! consumer->r_cache)\n        goto err2;\n\n    consumer->circ_buf_size = CONSUMER_CIRC_BUFSIZE;\n    spin_lock_init(&consumer->queue_lock);\n    sema_init(&consumer->start_sema, 0);\n\n    consumer_cache_init(consumer->w_cache);\n    consumer_cache_init(consumer->r_cache);\n\n\n    return 0;\n\nerr2:\n    kvfree(consumer->w_cache);\nerr1:\n    kvfree(consumer->circ_buf.buf);\n    return -ENOMEM;\n}\n\nvoid event_consumer_cleanup(struct event_consumer* c){\n    if(! IS_ERR_OR_NULL(c->consume_task)){\n        put_task_struct(c->consume_task);\n    }\n\n    kvfree(c->r_cache);\n    kvfree(c->w_cache);\n    kvfree(c->circ_buf.buf);\n}\n\n\nlong event_consumer_thread_create(struct event_target* event_target,\n                        const char* thread_name){\n    struct event_consumer* consumer = &event_target->event_consumer;\n    consumer->consume_task = kthread_create(event_queue_consume_thread,\n                                            event_target,\n                                            \"%s\", thread_name);\n\n    if(IS_ERR(consumer->consume_task)){\n        pr_warn(\"Failed to create consume thread %s - %ld\\n\", thread_name,\n                PTR_ERR(consumer->consume_task));\n        return PTR_ERR(consumer->consume_task);\n    }\n    get_task_struct(consumer->consume_task);\n\n    wake_up_process(consumer->consume_task);\n\n    // see documentation of kthread_stop (linux 4.19):\n    // if kthread is stopped very early,\n    // the threadfn might *never* be called. Here this might happen\n    // during the observation of short-lived processes.\n    // We must however make sure, it runs at least once, as\n    // events might be pending. So wait, until our thread calls \"up\".\n    down(&consumer->start_sema);\n\n    return 0;\n}\n\nvoid event_consumer_thread_setup(struct event_target* event_target){\n    struct event_consumer* consumer = &event_target->event_consumer;\n\n    if(event_target->mm) {\n#ifdef USE_MM_SET_FS_OFF\n        consumer->consume_tsk_oldfs = get_fs();\n        set_fs(USER_DS);\n#endif\n        kutil_use_mm(event_target->mm);\n    }\n\n    // use_mm() -> We want this kthread's page-cache memory allocations to account to\n    // the callers memcg. At least on linux 4.19 and ext4 using the mm of the\n    // caller should suffice. See also below stacktrace, which shows how\n    // exactly the memcg is used. Note however that mm->owner might be set to null,\n    // in case our parent process exits or execs, so we will only keep on logging file\n    // events, if current->mm->owner == event_target->caller_tsk. See\n    // also exit.c:mm_update_next_owner\n    //\n    // # ext4 mem_cgroup charging\n    // First the mem_cgroup is associated with a page:\n    // (some parts of the stacktrace were omitted for better readability).\n    //\n    // ext4_da_write_begin\n    // grab_cache_page_write_begin\n    //     pagecache_get_page\n    //         __alloc_pages_nodemask\n    //         add_to_page_cache_lru\n    //         __add_to_page_cache_locked\n    //             mem_cgroup_try_charge(current->mm) <-- !\n    //                 get_mem_cgroup_from_mm         <-- owner != null for correct accounting\n    //                 try_charge\n    //                 memcgroup_commit_charge: page->mem_cgroup = memcg; <-- !\n    //\n    // Then the mem_cgroup is taken later\n    //\n    // ext4_block_write_begin\n    //     create_empty_buffers\n    //         alloc_page_buffers ( in fs/buffer.c, uses __GFP_ACCOUNT!)\n    //             get_mem_cgroup_from_page: memcg = page->mem_cgroup;   <-- !\n    //             alloc_buffer_head\n    //                 kmem_cache_alloc\n\n\n    // set_user_nice(current, 1); // 2? 10? MAX_NICE?\n    // Only affects reads. See also: https://unix.stackexchange.com/a/480863/288001\n    set_task_ioprio(current, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 6));\n\n    // process events with user credentials\n    consumer->consume_task_orig_cred = override_creds(event_target->cred);\n}\n\nvoid event_consumer_thread_cleanup(struct event_target* event_target){\n    struct event_consumer* consumer = &event_target->event_consumer;\n\n    kutil_WARN_ONCE_IFN_DBG(current != consumer->consume_task,\n                            \"current != consumer->consume_task\");\n    revert_creds(consumer->consume_task_orig_cred);\n    if(event_target->mm){\n        kutil_unuse_mm(event_target->mm);\n#ifdef USE_MM_SET_FS_OFF\n        set_fs(consumer->consume_tsk_oldfs);\n#endif\n    }\n}\n\nvoid event_consumer_thread_stop(struct event_consumer* consumer){\n    int ret;\n    if( (ret = kthread_stop(consumer->consume_task))){\n        kutil_WARN_ONCE_IFN_DBG(1, \"event-consume-thread returned %d\\n\", ret);\n    }\n}\n\nbool event_consumer_flush_target_file_safe(struct event_target *t)\n{\n    ssize_t ret;\n    if(unlikely((ret = shournal_kio_flush(t->file)) < 0)){\n        WRITE_ONCE(t->ERROR, true);\n        pr_debug(\"Failed to flush event target file %s, returned %ld\\n\",\n                 t->file_init_path, ret);\n        event_target_write_result_to_user_ONCE(t, (int)-ret);\n        return false;\n    }\n    return true;\n}\n\n\nvoid close_event_consume(struct event_target* event_target, struct close_event* close_ev){\n    bool may_read;\n    bool may_write;\n\n    if(unlikely(event_target->ERROR)){\n        goto out;\n    }\n    if(unlikely(! close_ev->path.dentry->d_inode)){\n        pr_devel(\"ignore event, inode is NULL.\\n\");\n        goto out;\n    }\n    may_read =  kutil_inode_permission(&close_ev->path, MAY_READ) == 0;\n    if(unlikely(! may_read)){\n        // maybe the file event came from a setuid-program? Otherwise, the file\n        // might be writable, but not readable. We ignore this special\n        // case here, because we cannot (securely) hash it anyway.\n        __dbg_print_event(event_target, close_ev, \"may_read is false\");\n        goto out;\n    }\n    // __dbg_print_event(event_target, close_event, \"test\");\n    may_write = kutil_inode_permission(&close_ev->path, MAY_WRITE) == 0;\n\n    // Just as fanotify does, we consider O_RDWR only\n    // as write-event.\n    // maybe_todo: differentiate?\n    if(close_ev->f_mode & FMODE_WRITE){\n        __handle_write_event(event_target, close_ev, may_write);\n    } else {\n        __handle_read_event(event_target, close_ev, may_write);\n    }\n\nout:\n    close_event_cleanup(close_ev);\n}\n\n\nvoid close_event_cleanup(struct close_event* event){\n    dput(event->path.dentry);\n    mntput(event->path.mnt);\n}\n\n\n"
  },
  {
    "path": "kernel/event_consumer.h",
    "content": "#pragma once\n\n#include \"shournalk_global.h\"\n\n#include <linux/path.h>\n#include <linux/types.h>\n#include <linux/fs.h>\n#include <linux/spinlock.h>\n#include <linux/semaphore.h>\n#include <linux/limits.h>\n#include <linux/timer.h>\n#include <linux/compiler.h>\n#include <linux/circ_buf.h>\n\n#include \"kutil.h\"\n\nstruct event_target;\nstruct consumer_cache;\n\nstruct close_event {\n    struct path path;\n    fmode_t f_mode;\n};\n\nstruct event_consumer {\n    struct circ_buf circ_buf;\n    struct spinlock queue_lock;\n    int circ_buf_size;\n    bool woken_up;\n    struct task_struct* consume_task;\n\n    const struct cred* consume_task_orig_cred;\n    struct consumer_cache* w_cache;\n    struct path w_last_written_path; /* last logged full path */\n    struct consumer_cache* r_cache;\n    struct path r_last_written_path;\n    struct semaphore start_sema;\n#ifdef USE_MM_SET_FS_OFF\n       mm_segment_t consume_tsk_oldfs;\n#endif\n\n};\n\n\nlong event_consumer_init(struct event_consumer*);\nvoid event_consumer_cleanup(struct event_consumer*);\n\nlong event_consumer_thread_create(struct event_target* event_target,\n                        const char* thread_name);\nvoid event_consumer_thread_setup(struct event_target* event_target);\nvoid event_consumer_thread_cleanup(struct event_target* event_target);\n\nvoid event_consumer_thread_stop(struct event_consumer* consumer);\n\nbool event_consumer_flush_target_file_safe(struct event_target*);\n\n\nvoid close_event_consume(struct event_target*, struct close_event*);\nvoid close_event_cleanup(struct close_event* event);\n\n\n"
  },
  {
    "path": "kernel/event_consumer_cache.c",
    "content": "\n#include <linux/jiffies.h>\n#include <linux/dcache.h>\n\n#include \"event_consumer_cache.h\"\n#include \"kutil.h\"\n\n// stolen from fs/proc/base.c:do_proc_readlink\nstatic inline size_t\nd_path_len(const char* buf, size_t buflen, const char* pathname){\n    return buf + buflen - 1 - pathname;\n}\n\nstatic void __cache_entry_init(struct consumer_cache_entry* e){\n    e->dir.mnt = NULL;\n    e->dir.dentry = NULL;\n    e->dirname = NULL;\n    e->dirname_len = 0;\n    e->flags = 0;\n    e->__cache_invalid_jiffy = 0;\n}\n\nstatic bool __cache_entry_hit(const struct consumer_cache_entry*e,\n                              const struct vfsmount *mnt,\n                              const struct dentry *dentry){\n    return  dentry == e->dir.dentry  &&\n            mnt    == e->dir.mnt     &&\n            time_is_after_jiffies(e->__cache_invalid_jiffy);\n\n}\n\n\nstatic bool __append_dname_to_parent(struct consumer_cache_entry* parent,\n                                     struct qstr* dname ){\n    if(parent->dirname_len + dname->len >= sizeof(parent->__dirname_buf)){\n        pr_devel(\"path-buffer too small for %s/%s\",\n                 parent->dirname, dname->name);\n        return false;\n    }\n    if(parent->dirname != parent->__dirname_buf){\n        // d_path prepends backwards, to make sure we have the full buffer-length\n        // move to the front of our buffer\n        memmove(parent->__dirname_buf, parent->dirname, parent->dirname_len + 1);\n        parent->dirname = parent->__dirname_buf;\n    }\n\n    if(parent->dirname_len > 1){\n        // not the root node\n        parent->dirname[parent->dirname_len] = '/';\n        parent->dirname_len++;\n    }\n    memcpy(parent->dirname + parent->dirname_len,\n            (const char*)dname->name, dname->len + 1);\n    parent->dirname_len += dname->len;\n\n    return true;\n}\n\n\nvoid consumer_cache_init(struct consumer_cache* c){\n    __cache_entry_init(&c->_last_entry);\n}\n\n\n/// Try to find cached meta-data for the given directory\n/// @param existed: set to true, if existed\n/// @return the found or new entry or an ERROR_PTR on err. Note that\n/// in rare cases the corresponding directory-path may be *wrong*, because\n/// currently no reference on struct path is held!\nstruct consumer_cache_entry* consumer_cache_find(\n        struct consumer_cache* c, struct vfsmount *mnt, struct dentry *dentry,\n        bool* existed){\n    // dentry is initialized NULL, so on first call we never return true\n    struct consumer_cache_entry* e = &c->_last_entry;\n    struct dentry* dparent;\n\n    if(__cache_entry_hit(e, mnt, dentry)){\n        *existed = true;\n        return e;\n    }\n    dparent = READ_ONCE(dentry->d_parent);\n\n    if(__cache_entry_hit(e, mnt, dparent)){\n        bool append_success;\n        struct kutil_name_snapshot name_snapshot;\n        kutil_take_name_snapshot(&name_snapshot, dentry);\n        append_success = __append_dname_to_parent(e, &name_snapshot.name);\n        kutil_release_name_snapshot(&name_snapshot);\n        if(unlikely(! append_success)){\n            return ERR_PTR(-EDOM);\n        }\n        e->dir.dentry = dentry;\n        // for now, set existed to false, because we don't know\n        // whether child is e.g. an exclude-dir, if parent was so.\n        *existed = false;\n        return e;\n    }\n\n    *existed = false;\n\n    // maybe_todo: hold a path_get reference for correctness (implications?)?\n    e->dir.mnt = mnt;\n    e->dir.dentry = dentry;\n    e->dirname = d_path(&e->dir, e->__dirname_buf, PATH_MAX);\n    if (IS_ERR(e->dirname)) {\n        e->dir.dentry = NULL;\n        pr_devel(\"failed to resolve pathname\\n\");\n        // Dbg: print raw path in case d_path fail (why?)\n        // pathname = dentry_path_raw(e->file->f_path.dentry, g_tmp_path, PATH_MAX);\n        return (struct consumer_cache_entry*)e->dirname;\n    }\n    e->dirname_len = (int)(d_path_len(e->__dirname_buf, PATH_MAX, e->dirname));\n    e->__cache_invalid_jiffy = jiffies + msecs_to_jiffies(5000);\n\n    return e;\n}\n\n"
  },
  {
    "path": "kernel/event_consumer_cache.h",
    "content": "/* Cache d_names and settings (in flags) for\n * the given struct path. Note that in rare cases\n * wrong (older) paths may be returned:\n * 1. Currently we hold no referecne on the struct path. If the\n *    memory adress is reused, we return the old path.\n * 2. On a hit, we do not resolve the path again, which might\n *    have changed meanwhile.\n * However, the cache is invalidated after a short time and\n * at least the filename is always correct.\n *\n */\n\n#pragma once\n\n#include \"shournalk_global.h\"\n\n#include <linux/path.h>\n#include <linux/limits.h>\n\n// For consumer_cache_entry.flags\nenum\n{\n    DIRCACHE_W_OFF      = 1 << 0,\n    DIRCACHE_R_OFF      = 1 << 1,\n    DIRCACHE_SCRIPT_OFF = 1 << 2,\n};\n\n\nstruct consumer_cache_entry {\n    struct path dir; /* WARNING - do not dereference */\n    char* dirname;\n    int dirname_len;\n    int flags; // e.g. DIRCACHE_W_OFF\n    unsigned long __cache_invalid_jiffy;\n    char __dirname_buf[PATH_MAX];\n};\n\nstruct consumer_cache {\n    struct consumer_cache_entry _last_entry;\n};\n\n\nvoid consumer_cache_init(struct consumer_cache*);\n\nstruct consumer_cache_entry* consumer_cache_find(\n        struct consumer_cache*, struct vfsmount*, struct dentry*,\n        bool* existed);\n\n"
  },
  {
    "path": "kernel/event_handler.c",
    "content": "\n#include <linux/hashtable.h>\n#include <linux/slab.h>\n#include <linux/pid.h>\n#include <linux/file.h>\n#include <linux/fdtable.h>\n#include <linux/delay.h>\n#include <linux/thread_info.h>\n#include <linux/interrupt.h>\n\n\n#include \"event_handler.h\"\n#include \"event_target.h\"\n#include \"event_queue.h\"\n#include \"kutil.h\"\n#include \"tracepoint_helper.h\"\n\n\nstruct task_entry {\n     struct task_struct* tsk;\n     struct event_target* event_target; // here file events are written into\n     struct hlist_node node ;\n     struct rcu_work destroy_rwork;\n} ;\nstatic DEFINE_HASHTABLE(task_table, 16);\nstatic struct kmem_cache * __task_entry_cache;\n\nstatic DEFINE_SPINLOCK(task_table_lock);\nstatic struct workqueue_struct* del_taskentries_wq = NULL;\n\nstatic struct task_entry* __task_entry_alloc(void){\n    return kmem_cache_alloc(__task_entry_cache,\n                            GFP_NOWAIT | __GFP_ACCOUNT | __GFP_NOWARN);\n}\n\n/// Warning: May sleep!\nstatic void\n__task_entry_destroy(struct task_entry* e){\n    event_target_put(e->event_target);\n    kmem_cache_free(__task_entry_cache ,e);\n}\n\nstatic void __task_entry_destroy_work(struct work_struct *work){\n    struct task_entry* el = container_of(to_rcu_work(work),\n                    struct task_entry, destroy_rwork);\n    __task_entry_destroy(el);\n}\n\n\nstatic inline u32\n__task_hash(struct task_struct* task) {\n    return (u32)(long)task;\n}\n\n\nstatic inline struct task_entry*\n__find_task_entry(struct task_struct* task,\n                            u32 task_hash){\n    struct task_entry* el;\n    hash_for_each_possible_rcu(task_table, el, node, task_hash) {\n        if(el->tsk == task){\n            return el;\n        }\n    }\n    return NULL;\n}\n\n\n/// find and get a reference from task table under rcu_lock\nstatic inline __attribute__((__warn_unused_result__))\nstruct event_target*\n__find_get_event_target_safe(struct task_struct* task){\n    struct task_entry* el;\n    struct event_target* event_target;\n    u32 t_hash;\n    t_hash = __task_hash(task);\n    rcu_read_lock();\n    if((el = __find_task_entry(task, t_hash)) == NULL){\n        rcu_read_unlock();\n        return NULL;\n    }\n\n    event_target = event_target_get(el->event_target);\n    rcu_read_unlock();\n    return event_target;\n}\n\n/// Called when we stop observing the task set in the event_target's\n/// exit_tsk, either because it exited or it was unmarked for observation.\nstatic inline void\n__handle_exit_tsk_remove(struct task_struct *task, bool in_exit,\n                              struct event_target* event_target){\n    if(in_exit){\n        // see kernel/exit.c: the lower 8 bits are shifted.\n        //     do_exit((error_code&0xff)<<8);\n        // Undo that:\n        unsigned lower_exit_code = (task->exit_code >> 8) & 0xff;\n        smp_store_mb(event_target->exit_code, lower_exit_code);\n        pr_devel(\"event_target caller %d: parent task %d exited with %d\\n\",\n                 event_target->caller_tsk->pid, task->pid, lower_exit_code);\n    } else {\n        WRITE_ONCE(event_target->exit_tsk, NULL);\n        pr_debug(\"exit_tsk pid %d unset for \"\n                 \"caller %d\\n\", task->pid,\n                 event_target->caller_tsk->pid);\n    }\n\n}\n\n\n/// Insert the given task into the table. If the task exists\n/// and param update_if_exist is true, the event_target is updated, else\n/// -EEXIST is returned. On success, the target's ref-counter is incremeneted.\nstatic long\n__insert_task_into_table_safe(struct task_struct *task,\n                              struct event_target* target,\n                              bool update_if_exist){\n    struct task_entry* el;\n    u32 t_hash;\n    long ret = 0;\n    struct event_target* old_target;\n    t_hash = __task_hash(task);\n\n    rcu_read_lock();\n    // create-if-not-exist in same lock!\n    spin_lock(&task_table_lock);\n    if( likely((el=__find_task_entry(task, t_hash)) == NULL)) {\n        // Whoever is interested in the events, pays for the allocation.\n        struct mem_cgroup * oldcg;\n        oldcg = kutil_set_active_memcg(target->memcg);\n        el = __task_entry_alloc();\n        kutil_set_active_memcg(oldcg);\n        if(! el){\n            ret = -ENOMEM;\n            goto out_unlock;\n        }\n        el->tsk = task;\n        el->event_target = event_target_get(target);\n        hash_add_rcu(task_table, &el->node, t_hash);\n\n        goto out_unlock;\n    }\n    // target exists. Fail, if update not allowed\n    if(! update_if_exist){\n        ret = -EEXIST;\n        goto out_unlock;\n    }\n    old_target = el->event_target;\n    // first increment, then decrement ref-counter\n    // in case old_target == new_target.\n    el->event_target = event_target_get(target);\n    if(old_target != target){\n        pr_debug(\"pid %d: event_target \"\n                 \"caller changed from %d to %d\\n\", task->pid,\n                 old_target->caller_tsk->pid,\n                 target->caller_tsk->pid);\n        if(unlikely(old_target->exit_tsk == task)){\n            __handle_exit_tsk_remove(task, false, old_target);\n        }\n    }\n\n    spin_unlock(&task_table_lock);\n    rcu_read_unlock();\n\n    // might_sleep, so put outside of lock\n    event_target_put(old_target);\n\n    return ret;\n\nout_unlock:\n    spin_unlock(&task_table_lock);\n    rcu_read_unlock();\n    return ret;\n}\n\n\nstatic bool\n__remove_task_from_table_safe(struct task_struct *task, bool in_exit){\n    struct task_entry* el;\n    bool removed = false;\n    u32 t_hash;\n\n    t_hash = __task_hash(task);\n    rcu_read_lock();\n    if((el = __find_task_entry(task, t_hash)) != NULL){\n        spin_lock(&task_table_lock);\n        hash_del_rcu(&el->node);\n        // Maybe it is safe to concurrently INIT_RCU_WORK outside the spinlock.\n        // But better safe than sorry.\n        INIT_RCU_WORK(&el->destroy_rwork, __task_entry_destroy_work);\n        spin_unlock(&task_table_lock);\n\n        if(unlikely(el->event_target->exit_tsk == task)){\n            __handle_exit_tsk_remove(task, in_exit, el->event_target);\n        }\n        // free later\n        queue_rcu_work(del_taskentries_wq, &el->destroy_rwork);\n        removed = true;\n        // pr_devel(\"stop observing pid %d, init event path %s, caller %d\\n\",\n        //           task->pid, el->event_target->file_init_path,\n        //           el->event_target->caller_pid);\n    }\n    rcu_read_unlock();\n\n    return removed;\n}\n\n\nstatic inline bool __fput_is_interesting(const struct file* file,\n                                         const struct event_target* t){\n    bool w_enable;\n    bool r_enable;\n\n    w_enable = READ_ONCE(t->w_enable);\n    r_enable = READ_ONCE(t->r_enable);\n\n    return (w_enable && file->f_mode & FMODE_WRITE) ||\n           (r_enable && file->f_mode & FMODE_READ);\n}\n\n\n\n/// Check if @param task has same owner as current process\n/// stolen from kernel/sched/core.c\nstatic bool __task_check_same_owner(struct task_struct *task)\n{\n    const struct cred *cred_me = current_cred();\n    const struct cred *cred_task;\n    bool match;\n\n    rcu_read_lock();\n    cred_task = __task_cred(task);\n    match = (uid_eq(cred_me->euid, cred_task->euid) ||\n         uid_eq(cred_me->euid, cred_task->uid));\n    rcu_read_unlock();\n    return match;\n}\n\nstatic struct task_struct* __get_task_if_allowed(pid_t pid){\n    struct task_struct* tsk;\n\n    rcu_read_lock();\n    tsk = get_pid_task(find_vpid(pid), PIDTYPE_PID);\n    rcu_read_unlock();\n\n    if( IS_ERR_OR_NULL(tsk)){\n        // no such process in current pid-namespace\n        pr_devel(\"pid %d does not exist in current pid namespace\", pid);\n        return ERR_PTR(-ESRCH);\n    }\n    if(! __task_check_same_owner(tsk)){\n        pr_devel(\"pid %d has different owner\", tsk->pid);\n        put_task_struct(tsk);\n        return ERR_PTR(-EPERM);\n    }\n    return tsk;\n}\n\n\nint event_handler_constructor(void) {    \n    del_taskentries_wq = system_long_wq;\n    hash_init(task_table);\n\n    __task_entry_cache = KMEM_CACHE(task_entry, 0);\n    if(! __task_entry_cache)\n        return -ENOMEM;\n\n\n\n    return 0;\n}\n\n\nvoid event_handler_destructor(void)\n{\n    u32 bucket;\n    struct task_entry* el;\n    struct hlist_node *temp_node;\n\n    // First wait for all call_rcu() (called in queue_rcu_work) to\n    // complete using rcu_barrier(), then flush the used workqueue.\n    // Note that we are the only thread with access to the task_table,\n    // since sysfs and tracepoints were already disabled. When this\n    // function returns, all event_targets should have been freed.\n    // See also Documentation/RCU/rcubarrier.txt: synchronize_rcu() is\n    // *not* sufficent! We have to wait \"for all outstanding RCU\n    // callbacks to complete\".\n    rcu_barrier();\n    flush_workqueue(del_taskentries_wq);\n\n    hash_for_each_safe(task_table, bucket, temp_node, el, node) {\n        hash_del(&el->node);\n        __task_entry_destroy(el);\n    }\n    kmem_cache_destroy(__task_entry_cache);\n}\n\nstruct event_target*\nget_event_target_from_pid(pid_t pid){\n    struct task_struct* task;\n    struct event_target* event_target;\n    task = __get_task_if_allowed(pid);\n    if(IS_ERR(task)){\n        return (struct event_target*)task;\n    }\n    event_target = __find_get_event_target_safe(task);\n    if(unlikely(event_target == NULL)){\n        event_target = ERR_PTR(-ENXIO);\n    }\n    put_task_struct(task);\n    return event_target;\n}\n\n/// Register param event_target as target for file events for the\n/// given pid. The respective task must be running and\n/// the caller must have the necessary capabilities. If the process is\n/// already observed by another event_target we silently replace it\n/// with the new one.\n/// @param collect_exitcode: if set to true, set this task as the\n/// \"exit_tsk\" for which to collect the exit code\nlong event_handler_add_pid(struct event_target* event_target, pid_t pid,\n                           bool collect_exitcode){\n    struct task_struct* task;\n    long ret = 0;\n\n    task = __get_task_if_allowed(pid);\n    if(IS_ERR(task)){\n        return PTR_ERR(task);\n    }\n    ret = __insert_task_into_table_safe(task, event_target, true);\n\n    // We may have been asked to trace a task which is just about to exit and there is a\n    // small timeslot, where the task is still there but has already called our traced\n    // function cgroup_exit. Is this case, we have just created a stale event_target\n    // reference. Look at kernel/exit.c::do_exit. do_exit sets the PF_EXITING flag before\n    // calling cgroup_exit, so below code is fine.\n    if (unlikely(READ_ONCE(task->flags) & PF_EXITING)) {\n        pr_debug(\"just marked an exiting task. Removing it again\");\n        __remove_task_from_table_safe(task, false);\n    } else if(collect_exitcode && ret == 0){\n        WRITE_ONCE(event_target->exit_tsk, task);\n    }\n\n    put_task_struct(task);\n    return ret;\n}\n\nlong event_handler_remove_pid(pid_t pid){\n    struct task_struct* task;\n    long ret = 0;\n\n    task = __get_task_if_allowed(pid);\n    if(IS_ERR(task)){\n        return PTR_ERR(task);\n    }\n    if(! __remove_task_from_table_safe(task, false)){\n        ret = -ESRCH;\n    }\n    put_task_struct(task);\n    return ret;\n}\n\n\n/// If the current task shall be observed,\n/// enqueue the file event for later processing.\n/// Endless recursion is avoided by\n/// using the flag FMODE_NONOTIFY and by observing\n/// only regular files (so the target pipe does no harm\n/// as well).\nvoid event_handler_fput(unsigned long ip __attribute__ ((unused)),\n                        unsigned long parent_ip __attribute__ ((unused)),\n                        struct ftrace_ops *op __attribute__ ((unused)),\n                        struct pt_regs *regs)\n{\n    struct event_target* event_target;\n    struct file* file;\n\n    if(unlikely(current->flags & PF_KTHREAD))\n        return;\n\n    file = (struct file*)(kutil_get_first_arg_from_reg(tracepoint_helper_get_ftrace_regs(regs)));\n\n    // Ideally we would ftrace fsnotify_close which is, however, inlined\n    // (thus cannot be traced).\n    // Below code is partially duplicated from there.\n    if (file->f_mode & FMODE_NONOTIFY ||\n        // maybe_todo: check file_inode(file) == NULL ifndef FMODE_OPENED\n        !S_ISREG(file_inode(file)->i_mode)\n                     )\n        return;\n\n    // ftrace doc recommends to check this, however, __fput() calls dput() which\n    // does rcu_read_lock() itself, so we should be safe.\n    // if(! rcu_is_watching()) return;\n\n    kutil_WARN_DBG(atomic_read(&file_inode(file)->i_count) < 1,\n                   \"file_inode(file)->i_count < 1\");\n\n    if((event_target = __find_get_event_target_safe(current)) == NULL ){\n        return;\n    }\n    if( unlikely(! __fput_is_interesting(file, event_target)))\n        goto out_put;\n\n    // event_target ownership transferred to queue!\n    event_queue_add(event_target, file);\n\n    return;\n\nout_put:\n    // Might sleep!\n    event_target_put(event_target);\n}\n\n\nvoid event_handler_process_exit(struct task_struct *task)\n{\n    if(unlikely(current->flags & PF_KTHREAD))\n        return;\n\n    if (unlikely(!rcu_is_watching())){\n        kutil_WARN_DBG(1, \"called without rcu\");\n        return;\n    }\n    __remove_task_from_table_safe(task, true);\n}\n\n\n/// If the parent task is observed and the child task is in\n/// the same pid namespace, also add the child task to our\n/// task_table\nvoid event_handler_process_fork(struct task_struct *parent,\n                                  struct task_struct *child){\n    struct event_target* target;\n    long ret;\n\n    if(unlikely(current->flags & PF_KTHREAD))\n        return;\n\n    if (unlikely(!rcu_is_watching())){\n        kutil_WARN_DBG(1, \"called without rcu\");\n        return;\n    }\n\n    if((target = __find_get_event_target_safe(parent)) == NULL ){\n        // parent not observed -> ignore child too\n        return;\n    }\n    if(unlikely(READ_ONCE(target->ERROR))){\n        pr_devel(\"Ignore fork of pid %d. Error flag \"\n                 \"of event target is set.\", parent->pid);\n        goto put_out;\n    }\n    if(unlikely(task_active_pid_ns(child) != target->pid_ns)){\n        pr_devel(\"pid namespace does not match \"\n                 \"to parent with pid %d. Ignore.\\n\", parent->pid);\n        goto put_out;\n    }\n\n    if(unlikely((ret = __insert_task_into_table_safe(child, target, false)))){\n        // fixme: set some flag in this event_target.\n        pr_debug(\"failed to observe child process: %ld\", ret);\n        goto put_out;\n    }\n\nput_out:\n    event_target_put(target);\n}\n\n\n\n"
  },
  {
    "path": "kernel/event_handler.h",
    "content": "#pragma once\n\n#include \"shournalk_global.h\"\n\n#include <linux/types.h>\n\nstruct event_target;\nstruct ftrace_ops;\nstruct pt_regs;\n\nint  event_handler_constructor(void);\nvoid event_handler_destructor(void);\n\nstruct event_target*\nget_event_target_from_pid(pid_t pid);\nlong event_handler_add_pid(struct event_target*, pid_t, bool collect_exitcode);\nlong event_handler_remove_pid(pid_t pid);\n\nnoinline notrace\nvoid event_handler_fput(unsigned long, unsigned long,\n                        struct ftrace_ops*, struct pt_regs*);\n\nvoid event_handler_process_exit(struct task_struct *task);\n\nvoid event_handler_process_fork(struct task_struct *parent,\n                                  struct task_struct *child);\n\n"
  },
  {
    "path": "kernel/event_queue.c",
    "content": "\n#include <linux/circ_buf.h>\n#include <linux/compiler.h>\n#include <linux/slab.h>\n#include <linux/sched.h>\n#include <linux/file.h>\n#include <linux/cred.h>\n#include <linux/kthread.h>\n#include <linux/timer.h>\n#include <linux/random.h>\n#include <linux/delay.h>\n#include <linux/ioprio.h>\n#include <linux/fs.h>\n#include <linux/kthread.h>\n#include <linux/mmu_context.h>\n#include <linux/memcontrol.h>\n#include <linux/mm_types.h>\n\n\n#include \"event_queue.h\"\n#include \"event_consumer.h\"\n#include \"shournal_kio.h\"\n#include \"kutil.h\"\n\n#define __CONSUMER_JIFFY_OFFSET 200\n\n\n/// \"Consumes\" the ringbuffer (writes tail)\n/// @return the number of consumed bytes (*not* events).\nstatic int\n__consume_close_events(struct event_target* event_target){\n    int bytes;\n    int bytes_total;\n    int head, tail;\n    struct circ_buf* circ_buf = &event_target->event_consumer.circ_buf;\n    const int cir_buf_size = event_target->event_consumer.circ_buf_size;\n    struct close_event* e;\n    unsigned long next_sched_jiffy;\n\n    head = smp_load_acquire(&circ_buf->head);\n    tail = READ_ONCE(circ_buf->tail);\n    bytes_total = CIRC_CNT(head, tail, cir_buf_size);\n\n    next_sched_jiffy =  jiffies + msecs_to_jiffies(__CONSUMER_JIFFY_OFFSET);\n    for(bytes=0; bytes < bytes_total; ){\n        e = (struct close_event*)&circ_buf->buf[tail];\n        tail = (tail + sizeof (struct close_event)) & (cir_buf_size - 1);\n        bytes += sizeof(struct close_event);\n\n        close_event_consume(event_target, e);\n        if(time_is_before_jiffies(next_sched_jiffy)){\n            smp_store_release(&circ_buf->tail, tail);\n            kutil_kthread_be_nice();\n            next_sched_jiffy =  jiffies + msecs_to_jiffies(__CONSUMER_JIFFY_OFFSET);\n        }\n    }\n    // maybe_todo: move into loop to avoid event-overflow?\n    smp_store_release(&circ_buf->tail, tail);\n\n    if( bytes_total > 0){\n        // bulk refcount-decrement..\n        int event_count = bytes_total/sizeof(struct close_event);\n        event_target->consumed_event_count += event_count;\n        if(kuref_sub_and_test(event_count, &event_target->_f_count)){\n            __event_target_put(event_target);\n        }\n    }\n    return bytes_total;\n}\n\n\n/// For each event_target one thread is created, which\n/// consumes the events of the target's ringbuffer.\nint event_queue_consume_thread(void* data){\n    struct event_target* event_target = (struct event_target*)data;\n    struct event_consumer* consumer = &event_target->event_consumer;\n    struct kbuffered_file* target_file = event_target->file;\n    uint64_t schedcount = 0;\n    int sleep_counter = 0;\n\n    up(&consumer->start_sema);\n\n    event_consumer_thread_setup(event_target);\n\n    // lost wake-up problem.\n    set_current_state(TASK_INTERRUPTIBLE);\n\n    // Calling wake_up_process is costly, so we want the producer\n    // to do so rarely. Therefore:\n    // First loop until nothing can be consumed.\n    // Then sleep and check again for new events, before scheduling\n    // without timeout.\n    while(!kthread_should_stop()){\n        int consumed_bytes = __consume_close_events(event_target);\n        if(consumed_bytes){\n            sleep_counter = 0;\n        } else {\n            // maybe a good time to flush?\n            if(target_file->__pos > target_file->__bufsize/4){\n                event_consumer_flush_target_file_safe(event_target);\n                // this might have taken a while, so..\n                kutil_kthread_be_nice();\n                continue;\n            }\n\n            if(sleep_counter > 2){\n                smp_store_mb(event_target->event_consumer.woken_up, false);\n                // By checking again for events we allow a\n                // harmless race in the consumer.\n                __consume_close_events(event_target);\n                schedcount++;\n                schedule();\n            } else {\n                schedule_timeout(1);\n                sleep_counter++;\n            }\n            set_current_state(TASK_INTERRUPTIBLE);\n        }\n\n    }\n\n    // consume final remaining events. Note in case *this* kthread\n    // puts the final event_target-ref, we never get here.\n    __consume_close_events(event_target);\n    event_consumer_thread_cleanup(event_target);\n    kutil_kthread_exit(NULL, 0);\n    return 0;\n}\n\n\n"
  },
  {
    "path": "kernel/event_queue.h",
    "content": "/* File events are stored into a ringbuffer\n * and consumed in a per-event_target-thread.\n */\n\n#pragma once\n\n#include \"shournalk_global.h\"\n\n#include <linux/mount.h>\n\n#include \"event_target.h\"\n#include \"event_consumer.h\"\n\n\nint event_queue_consume_thread(void *data);\n\n/// Threadsafe enqueue the close event and wake up\n/// the consumer. This function consumes one event_target-reference (passes\n/// it to the consumer or puts it in case of overflow)!\nstatic inline void event_queue_add(struct event_target* event_target, struct file* file){\n    int head;\n    int tail;\n    int remaining_bytes;\n    struct close_event* close_ev;\n    struct event_consumer* consumer = &event_target->event_consumer;\n    struct circ_buf* circ_buf = &consumer->circ_buf;\n\n    // Be optimistic, that we have space in the ringbuf. We\n    // *must* get the refs before enqueuing, otherwise\n    // the consumer might put the last ones before us!\n    mntget(file->f_path.mnt);\n    dget(file->f_path.dentry);\n\n    // No need to ihold(dentry->d_inode)\n    // \"as long as a counted reference is held to a dentry,\n    //  a non-NULL ->d_inode value will never be changed.\"\n    // See also: kernel.org/doc/html/latest/filesystems/path-lookup.html\n\n    spin_lock(&consumer->queue_lock);\n    head = READ_ONCE(circ_buf->head);\n    tail = READ_ONCE(circ_buf->tail);\n    remaining_bytes = CIRC_SPACE(head ,tail ,consumer->circ_buf_size);\n\n    if (unlikely(remaining_bytes < (int)sizeof (struct close_event))) {\n        unsigned long long lostcount = READ_ONCE(event_target->lost_event_count);\n        ++lostcount;\n        // Event is lost, consumer was too slow.\n        pr_devel(\"too many file events - skipping some (now lost: %lld)\\n\", lostcount);\n        WRITE_ONCE(event_target->lost_event_count, lostcount);\n        goto overflow_out;\n    }\n    close_ev = (struct close_event*)&circ_buf->buf[head];\n    close_ev->f_mode = file->f_mode;\n    close_ev->path = file->f_path;\n\n    // write new head *after* having written content:\n    head = (head + sizeof (struct close_event)) & (consumer->circ_buf_size - 1);\n    smp_store_release(&circ_buf->head, head);\n\n    spin_unlock(&consumer->queue_lock);\n\n    // We could simply call wake_up_process all the time, but this\n    // slows down things significantly when many events occur.\n    // Therefore try to only wake the consumer up, if\n    // necessary.\n    // The consumer sets woken_up to false and\n    // afterwards checks for remaining events. Due to this race\n    // it may (rarely) happen that we wake the consumer up\n    // with nothing to do, *but* it can *never* happen\n    // that we produce something which is never consumed.\n    if(READ_ONCE(consumer->woken_up))\n        return;\n\n    smp_store_mb(consumer->woken_up, true);\n    wake_up_process(consumer->consume_task);\n\n    return;\n\noverflow_out:\n    spin_unlock(&consumer->queue_lock);\n    dput(file->f_path.dentry);\n    mntput(file->f_path.mnt);\n    event_target_put(event_target);\n}\n\n"
  },
  {
    "path": "kernel/event_target.c",
    "content": "\n\n#include <linux/hashtable.h>\n#include <linux/memory.h>\n#include <linux/memcontrol.h>\n#include <linux/slab.h>\n#include <linux/fs.h>\n#include <linux/file.h>\n#include <linux/fdtable.h>\n#include <linux/printk.h>\n#include <linux/cred.h>\n#include <linux/pipe_fs_i.h>\n#include <linux/circ_buf.h>\n#include <linux/user_namespace.h>\n\n#include \"event_target.h\"\n#include \"kutil.h\"\n#include \"shournal_kio.h\"\n#include \"shournalk_user.h\"\n#include \"xxhash_shournalk.h\"\n\n\nstatic struct file* __get_check_target_file(int fd){\n    struct file* file;\n    int error_nb = 0;\n\n    file = fget(fd);\n    if (!file){\n        pr_devel(\"fget failed on target file\\n\");\n        error_nb = -EBADF;\n        goto err_cleanup_ret;\n    }\n\n    // We'll write to this file, so make sure we're allowed to\n    if (!(file->f_mode & FMODE_WRITE)) {\n        pr_debug(\"target file not writable\\n\");\n        error_nb = -EPERM;\n        goto err_cleanup_ret;\n    }\n\n    if (! S_ISREG(file_inode(file)->i_mode) ) {\n        pr_debug(\"target file not a regular file\\n\");\n        error_nb = -EBADFD;\n        goto err_cleanup_ret;\n    }\n    return file;\n\n\nerr_cleanup_ret:\n    if(! IS_ERR_OR_NULL(file)){\n        fput(file);\n    }\n    return ERR_PTR(error_nb);\n}\n\n\n/// Get and reopen the pipe passed from userspace.\n/// That way we are independent of the user-space\n/// file status flags and always write with O_NONBLOCK\n/// (a malicious user-space process might otherwise\n/// block us indefinitely).\nstatic struct file* __get_check_pipe(int pipe_fd){\n    struct file* orig_pipe = NULL;\n    struct file* new_pipe = NULL;\n    long error_nb = 0;\n\n    orig_pipe = fget(pipe_fd);\n    if (!orig_pipe){\n        pr_devel(\"fget failed on pipe-fd\\n\");\n        error_nb = -EBADF;\n        goto err_cleanup_ret;\n    }\n    if (! S_ISFIFO(file_inode(orig_pipe)->i_mode)) {\n        pr_debug(\"passed fd not a FIFO\\n\");\n        error_nb = -ENOTTY;\n        goto err_cleanup_ret;\n    }\n\n    if (!(orig_pipe->f_mode & FMODE_WRITE)) {\n        pr_debug(\"passed FIFO descriptor is not the write end\\n\");\n        error_nb = -EPERM;\n        goto err_cleanup_ret;\n    }\n\n    // With CONFIG_PROVE_LOCKING, kernel v5.10.191 a spurious \"BUG: Invalid wait context\"\n    // occurred. Apparently, during dentry_open, a mutex is locked, thus previous code\n    // calling dentry_open inside spin_lock(&current->files->file_lock) was buggy. Let's\n    // remember this by calling:\n    might_sleep();\n    // reopen in nonblocking mode\n    new_pipe = dentry_open(&orig_pipe->f_path,\n                           O_WRONLY | O_NONBLOCK,\n                           current->cred);\n    if(!new_pipe){\n        error_nb = -EXDEV;\n        goto err_cleanup_ret;\n    }\n    if(IS_ERR(new_pipe)){\n        error_nb = PTR_ERR(new_pipe);\n        goto err_cleanup_ret;\n    }\n    fput(orig_pipe);\n\n    return new_pipe;\n\n\nerr_cleanup_ret:\n    if(! IS_ERR_OR_NULL(orig_pipe)) fput(orig_pipe);\n    if(! IS_ERR_OR_NULL(new_pipe)) fput(new_pipe);\n\n    return ERR_PTR(error_nb);\n}\n\n\n\nstatic struct event_target*\n__event_target_create(struct file* target_file, struct file* pipe_w,\n                     const struct shournalk_mark_struct * mark_struct){\n    struct event_target* t = NULL;\n    struct kbuffered_file* target_file_buffered = NULL;\n    struct mem_cgroup* memcg = NULL;\n    struct pid_namespace *pid_ns = task_active_pid_ns(current);\n    char* path_tmp;\n    long error = -ENOSYS;\n    struct mm_struct * mm = NULL;\n\n    if(!pid_ns){\n        WARN(1, \"pid_ns == NULL\");\n        return ERR_PTR(-ENXIO);\n    }\n    memcg = get_mem_cgroup_from_mm(current->mm);\n    if(! memcg) {\n        WARN(1, \"memcg == NULL\");\n        return ERR_PTR(-ENXIO);\n    }\n\n    mm = get_task_mm(current);\n    if(mm) {\n        mmgrab(mm);\n    } else {\n        pr_debug(\"mm == NULL\"); // does no real harm though\n    }\n\n    t = kvzalloc(sizeof (struct event_target), SHOURNALK_GFP | __GFP_RETRY_MAYFAIL);\n    if(!t) {\n        error = -ENOMEM;\n        goto error_out;\n    }\n\n    t->partial_hash.bufsize = PAGE_SIZE;\n    t->partial_hash.buf = kmalloc(t->partial_hash.bufsize, SHOURNALK_GFP);\n    if(! t->partial_hash.buf){\n        error = -ENOMEM;\n        goto error_out;\n    }\n\n    t->partial_hash.xxh_state = kmalloc(sizeof (struct xxh64_state), SHOURNALK_GFP);\n    if(!t->partial_hash.xxh_state){\n        error = -ENOMEM;\n        goto error_out;\n    }\n\n    path_tmp = d_path(&target_file->f_path,\n                      t->file_init_path,\n                      sizeof (t->file_init_path));\n    if (IS_ERR(path_tmp)) {\n        pr_debug(\"failed to resolve target file pathname\\n\");\n        error = PTR_ERR(path_tmp);\n        goto error_out;\n    }\n    memmove(t->file_init_path, path_tmp, strlen(path_tmp) + 1);\n\n    target_file_buffered = shournal_kio_from_file(target_file, TARGET_FILE_BUFSIZE);\n    if(IS_ERR(target_file_buffered)){\n        error = PTR_ERR(target_file_buffered);\n        goto error_out;\n    }\n    if((error = event_consumer_init(&t->event_consumer)) )\n         goto error_out;\n\n    t->exit_code = SHOURNALK_INVALID_EXIT_CODE;\n    t->pid_ns = pid_ns;\n    t->user_ns = current_user_ns();\n    t->memcg = memcg;\n    t->mm = mm;\n    t->file = target_file_buffered;\n    t->pipe_w = pipe_w;\n    t->caller_tsk = current;\n    atomic_set(&t->_written_to_user_pipe, 0);\n    t->ERROR = false;\n\n    kuref_set(&t->_f_count, 1);\n    t->cred = current_cred();\n    t->lost_event_count = 0;\n    t->stored_files_count = 0;\n\n    t->partial_hash.chunksize = mark_struct->settings.hash_chunksize;\n    t->partial_hash.max_count_of_reads = mark_struct->settings.hash_max_count_reads;\n\n    mutex_init(&t->lock);\n    t->settings = mark_struct->settings;\n    kpathtree_init(&t->w_includes);\n    kpathtree_init(&t->w_excludes);\n    kpathtree_init(&t->r_includes);\n    kpathtree_init(&t->r_excludes);\n    kpathtree_init(&t->script_includes);\n    kpathtree_init(&t->script_excludes);\n\n    file_extensions_init(&t->script_ext);\n\n\n    // do not fail from here on, otherwise references would need\n    // to be dropped again\n    get_pid_ns(pid_ns);\n    get_cred(current_cred());\n    get_task_struct(current);\n    get_user_ns(t->user_ns);\n\n\n    return t;\n\n\nerror_out:\n    if(! IS_ERR_OR_NULL(target_file_buffered)) kfree(target_file_buffered);\n    if(! IS_ERR_OR_NULL(t)) {\n        kfree(t->partial_hash.xxh_state);\n        kvfree(t->partial_hash.buf);\n        kvfree(t);\n    }\n    if(memcg) mem_cgroup_put(memcg);\n    if(mm) { mmdrop(mm); mmput(mm); }\n\n    return ERR_PTR(error);\n}\n\nstatic void __event_target_free(struct event_target* t){\n    event_consumer_cleanup(&t->event_consumer);\n    file_extensions_cleanup(&t->script_ext);\n    kpathtree_cleanup(&t->w_includes);\n    kpathtree_cleanup(&t->w_excludes);\n    kpathtree_cleanup(&t->r_includes);\n    kpathtree_cleanup(&t->r_excludes);\n    kpathtree_cleanup(&t->script_includes);\n    kpathtree_cleanup(&t->script_excludes);\n\n    put_user_ns(t->user_ns);\n    put_pid_ns(t->pid_ns);\n    mem_cgroup_put(t->memcg);\n    if(t->mm){ mmdrop(t->mm); mmput(t->mm); }\n    put_task_struct(t->caller_tsk);\n\n    put_cred(t->cred);\n    shournal_kio_close(t->file);\n    fput(t->pipe_w);\n    kvfree(t->partial_hash.buf);\n    kfree(t->partial_hash.xxh_state);\n    kvfree(t);\n}\n\nstatic void __envent_target_destroy_work(struct work_struct *work){\n    struct event_target* t = container_of(to_rcu_work(work),\n                    struct event_target, destroy_rwork);\n    __event_target_free(t);\n}\n\n\n\n////////////////////////////// public ////////////////////////////////////\n\n\n// TODO: limit listeners per user to 128, like fanotify does.\n/// event_target's are found by the struct file\n/// where metadata about the file events are written to.\n/// That way, multiple pid's can be marked for observation.\n/// If no entry for the given target_fd exists, create a new one\n/// (and store it in the hash-table). Once all processes finished\n/// (or were unmarked again), notify userspace by writing into\n/// the passed pipe.\nstruct event_target*\nevent_target_create(const struct shournalk_mark_struct * mark_struct)\n{\n    struct event_target* event_target = NULL;\n    struct file* target_file = NULL;\n    struct file* pipe_w = NULL;\n    long error = -ENOSYS;\n\n    target_file = __get_check_target_file(mark_struct->target_fd);\n    if(IS_ERR(target_file)){\n        return (void*)target_file;\n    }\n\n    pipe_w = __get_check_pipe(mark_struct->pipe_fd);\n    if(IS_ERR(pipe_w)){\n        error = PTR_ERR(pipe_w);\n        goto err_put_unlock;\n    }\n    event_target = __event_target_create(target_file, pipe_w, mark_struct);\n    if(IS_ERR(event_target) ){\n        error = PTR_ERR(event_target);\n        goto err_put_unlock;\n    }\n\n    // maybe_todo: add caller-pid to threadname?\n    if((error = event_consumer_thread_create(event_target, \"shournalk_consumer\"))){\n        goto err_put_unlock;\n    }\n\n    return event_target;\n\nerr_put_unlock:\n    if(! IS_ERR_OR_NULL(event_target)){\n        // ownership of pipe and target_file already transferred\n        __event_target_free(event_target);\n    } else {\n        if(! IS_ERR_OR_NULL(pipe_w)) fput(pipe_w);\n        if(! IS_ERR_OR_NULL(target_file)) fput(target_file);\n    }\n    return ERR_PTR(error);\n}\n\n\n// no events are registered before target is commited\nlong event_target_commit(struct event_target* t){\n    WARN(! mutex_is_locked(&t->lock), \"commit called without target lock\\n\");\n    barrier();\n\n    if( unlikely(event_target_is_commited(t))){\n        pr_debug(\"event target already commited\");\n        return -EBUSY;\n    }\n    if(t->w_includes.n_paths){\n        WRITE_ONCE(t->w_enable, true);\n    }\n    if(t->r_includes.n_paths || t->script_includes.n_paths){\n        WRITE_ONCE(t->r_enable, true);\n    }\n    if( unlikely( ! event_target_is_commited(t))){\n        // nothing marked - user did not specify any include-paths\n        pr_debug(\"cannot commit - no include-paths registered\");\n        return -ENOTDIR;\n    }\n    return 0;\n}\n\nbool event_target_is_commited(const struct event_target* t){\n    return READ_ONCE(t->w_enable) || READ_ONCE(t->r_enable);\n}\n\n\n/// Final put\nvoid __event_target_put(struct event_target* event_target){\n    long user_ret;\n    int pending_bytes;\n    struct event_consumer* consumer = &event_target->event_consumer;\n    struct circ_buf* circ_buf = &consumer->circ_buf;\n    bool we_are_consume_thread;\n\n    might_sleep();\n\n#ifdef DEBUG\n    if(event_target->__dbg_flags){\n        pr_info(\"event_target has dbg-flags set!\\n\");\n        dump_stack();\n    }\n#endif\n    we_are_consume_thread = current == consumer->consume_task;\n    if(we_are_consume_thread){\n        event_consumer_thread_cleanup(event_target);\n    } else {\n        // stopping the consumer thread also flushes the event buffer\n        // one last time\n        event_consumer_thread_stop(consumer);\n    }\n\n    pr_devel(\"Event processing done. Caller pid: %d - init target file path %s\\n\",\n             event_target->caller_tsk->pid, event_target->file_init_path);\n    pr_devel(\"consumed count: %lld, examined files: w: %lld, r: %lld\\n\",\n            event_target->consumed_event_count,\n            event_target->w_examined_count,\n            event_target->r_examined_count);\n\n    user_ret = shournal_kio_flush(event_target->file);\n    if(user_ret >= 0){\n        user_ret = 0;\n    } else {\n        pr_debug(\"final target-file flush failed with %ld\\n\", user_ret);\n        user_ret = -user_ret;\n    }\n    pending_bytes = CIRC_CNT(READ_ONCE(circ_buf->head),\n                             READ_ONCE(circ_buf->tail),\n                             event_target->event_consumer.circ_buf_size);\n    kutil_WARN_ONCE_IFN_DBG(pending_bytes != 0,\n                            \"pending bytes not 0 but %d\", pending_bytes);\n\n    event_target_write_result_to_user_ONCE(event_target, user_ret);\n\n    // pr_info(\"dircache-hits: %lld, pathwrite_hits: %lld\\n\", event_target->_dircache_hits,\n    //             event_target->_pathwrite_hits);\n\n    if(current_work() == NULL){\n        INIT_RCU_WORK(&event_target->destroy_rwork, __envent_target_destroy_work);\n        // system_long_wq is flushed in event_handler_destructor, so do not change!\n        queue_rcu_work(system_long_wq, &event_target->destroy_rwork);\n    } else {\n        __event_target_free(event_target);\n    }\n\n    if(we_are_consume_thread){\n        // we just released the final reference - that's it.\n        kutil_kthread_exit(NULL, 0);\n    }\n}\n\n\n\n\nvoid event_target_write_result_to_user_ONCE(struct event_target* event_target, long error_nb){\n    loff_t pos;\n    ssize_t write_ret;\n    struct shournalk_run_result result = {\n        .error_nb = (int)error_nb,\n        .w_event_count = event_target->w_event_count,\n        .r_event_count = event_target->r_event_count,\n        .lost_event_count = event_target->lost_event_count,\n        .stored_event_count = event_target->stored_files_count,\n        .selected_exitcode = event_target->exit_code\n    };\n    if(atomic_xchg(&event_target->_written_to_user_pipe, 1)){\n        pr_devel(\"already written result (probably a previous error occurred\");\n        return;\n    }\n    pos = 0;\n    write_ret = kutil_kernel_write(\n                event_target->pipe_w, &result, sizeof(result), &pos);\n    if(write_ret != sizeof(result)){\n        pr_debug(\"Failed to write to user pipe - returned: %ld\", write_ret);\n    }\n}\n\n"
  },
  {
    "path": "kernel/event_target.h",
    "content": "\n#pragma once\n\n#include \"shournalk_global.h\"\n#include \"xxhash_common.h\"\n\n#include <linux/atomic.h>\n#include <linux/limits.h>\n\n#include \"kpathtree.h\"\n#include \"kfileextensions.h\"\n#include \"shournalk_user.h\"\n#include \"event_consumer.h\"\n#include \"kutil.h\"\n\n// somewhat arbitrary, maybe raise?\n#define PART_HASH_MAX_CHUNKSIZE 4096*16\n\n#define TARGET_FILE_BUFSIZE  (1 << 15)\n\nstruct cred;\nstruct file;\nstruct kbuffered_file;\nstruct pid_namespace;\nstruct user_namespace;\nstruct shournalk_mark_struct;\nstruct dentry;\n\n\nstruct event_target {\n    kuref_t _f_count; /* refcount - do not edit */\n\n    bool w_enable; /* record write events */\n    bool r_enable; /* record read events */\n    bool ERROR; /* lazy-release references in case of an error */\n    uint64_t lost_event_count;\n    struct task_struct* exit_tsk; /* task for which to collect the exit code */\n    int exit_code; /* see exit_tsk */\n\n    struct event_consumer event_consumer;\n    uint64_t consumed_event_count;\n    struct file* pipe_w; /* write end of pipe. Id and bridge to user space group */\n    struct kbuffered_file* file; /* write events in here from kernel space */\n    const struct cred *cred; /* of the owner of the event target */\n    uint64_t w_event_count; /* # logged write events */\n    uint64_t w_dropped_count;  /* # dropped exceeding max_event_count */\n    uint64_t w_deleted_count;  /* # file was deleted */\n    uint64_t w_examined_count;  /* events taken a closer look at */\n    uint64_t r_event_count; /* # logged read events */\n    uint64_t r_dropped_count;  /* # dropped exceeding max_event_count */\n    uint64_t r_deleted_count;  /* # file was deleted */\n    uint64_t r_examined_count;  /* events taken a closer look at */\n\n    unsigned stored_files_count;\n    struct pid_namespace *pid_ns; /* we only follow forks in same pid ns */\n    struct user_namespace* user_ns; /* of caller */\n    struct mem_cgroup* memcg;     /* of caller */\n    struct mm_struct* mm;         /* of caller */\n\n    struct task_struct* caller_tsk; /* the caller interested in events. */\n\n    struct shounalk_settings settings;\n    struct partial_xxhash partial_hash;\n\n    struct mutex lock; /* protects adding paths before committed */\n\n    atomic_t _written_to_user_pipe; /* we write to user pipe only once */\n    uint64_t _dircache_hits;\n    uint64_t _pathwrite_hits;\n\n    struct file_extensions script_ext;\n    struct kpathtree w_includes;\n    struct kpathtree w_excludes;\n    struct kpathtree r_includes;\n    struct kpathtree r_excludes;\n    struct kpathtree script_includes;\n    struct kpathtree script_excludes;\n\n    char file_init_path[PATH_MAX];\n\n    struct rcu_work destroy_rwork;\n    int __dbg_flags;\n};\n\nstruct event_target* event_target_create(const struct shournalk_mark_struct*);\nlong event_target_commit(struct event_target*);\nbool event_target_is_commited(const struct event_target*);\n\n\nstatic inline __attribute__((__warn_unused_result__))\nstruct event_target*\nevent_target_get(struct event_target* event){\n    if(likely(kuref_inc_not_zero(&event->_f_count))){\n        return event;\n    }\n    return NULL;\n}\n\nvoid __event_target_put(struct event_target* event_target);\n\nstatic inline void\nevent_target_put(struct event_target* event_target){\n#ifdef DEBUG\n    might_sleep();\n#endif\n\n    if(unlikely( kuref_dec_and_test(&event_target->_f_count) )){\n        __event_target_put(event_target);\n    }\n}\n\n\nvoid event_target_write_result_to_user_ONCE(struct event_target*, long error_nb);\n\n"
  },
  {
    "path": "kernel/hash_table_str.c",
    "content": "\n#include \"hash_table_str.h\"\n\n#include <linux/slab.h>\n#include <linux/mm.h>\n\n\nstatic const int HASH_GFP_FLAGS = SHOURNALK_GFP | __GFP_RETRY_MAYFAIL;\n\n/// creates a copy of the passed string\nstruct hash_entry_str*\nhash_entry_str_create(const char* str, size_t str_len){\n    struct hash_entry_str* str_entry = kmalloc(sizeof (struct hash_entry_str),\n                                          HASH_GFP_FLAGS);\n    if(!str_entry){\n        return ERR_PTR(-ENOMEM);\n    }\n    str_entry->str = kmalloc(str_len, HASH_GFP_FLAGS);\n    if(!str_entry->str){\n        kfree(str_entry);\n        return ERR_PTR(-ENOMEM);\n    }\n\n    memcpy(str_entry->str, str, str_len);\n    str_entry->str_len = str_len;\n    return str_entry;\n}\n\n\nvoid hash_entry_str_free(struct hash_entry_str* entry){\n    kfree(entry->str);\n    kfree(entry);\n}\n\n"
  },
  {
    "path": "kernel/hash_table_str.h",
    "content": "\n#pragma once\n\n#include \"shournalk_global.h\"\n#include \"kutil.h\"\n\n#include <linux/hashtable.h>\n\n#include \"xxhash_shournalk.h\"\n\n\nstatic inline u32\n__hash_table_str_do_hash(const char* path, size_t path_len) {\n    return xxh32(path, path_len, 0);\n}\n\nstruct hash_entry_str {\n     char* str;\n     size_t str_len;\n     struct hlist_node node ;\n};\n\n\nstruct hash_entry_str*\nhash_entry_str_create(const char* str, size_t str_len);\nvoid hash_entry_str_free(struct hash_entry_str* entry);\n\n\n/// @param _obj_ must be passed as null, result is stored there if any\n#define hash_table_str_find(_name_, _obj_, _str_, _str_len_)            \\\n do {                                                                   \\\n    struct hash_entry_str* ____tmp;                                     \\\n    u32 ____str_hash;                                                   \\\n    ____str_hash = __hash_table_str_do_hash(_str_, _str_len_);          \\\n    kutil_WARN_DBG((_obj_) != NULL, \"(_obj_) != NULL\");                 \\\n    hash_for_each_possible(_name_, ____tmp, node, ____str_hash)         \\\n        if(____tmp->str_len == (_str_len_) &&                           \\\n                memcmp(____tmp->str, (_str_), (_str_len_)) == 0){       \\\n            (_obj_) = ____tmp;                                          \\\n            break;                                                      \\\n         }                                                              \\\n    } while (0)\n\n\n\n#define hash_table_str_add(_name_, _obj_)   \\\n    hash_add(_name_, &(_obj_)->node,        \\\n             __hash_table_str_do_hash((_obj_)->str, (_obj_)->str_len))\n\n\n#define hash_table_str_cleanup(_name_)                                      \\\n    do {                                                                    \\\n    u32 ____bucket;                                                         \\\n    struct hash_entry_str* ____el;                                          \\\n    struct hlist_node *____temp_node;                                       \\\n    hash_for_each_safe((_name_), ____bucket, ____temp_node, ____el, node) { \\\n        hash_del(&____el->node);                                            \\\n        hash_entry_str_free(____el);                                        \\\n    }                                                                       \\\n    } while (0)\n\n"
  },
  {
    "path": "kernel/kfileextensions.c",
    "content": "\n\n#include \"kfileextensions.h\"\n\n#include \"hash_table_str.h\"\n\n\nvoid file_extensions_init(struct file_extensions* extensions){\n    hash_init(extensions->table);\n    extensions->n_ext = 0;\n}\n\n\nvoid file_extensions_cleanup(struct file_extensions* extensions){\n    hash_table_str_cleanup(extensions->table);\n}\n\n\nlong file_extensions_add(struct file_extensions* extensions,\n                         const char* ext, size_t ext_len){\n    struct hash_entry_str* entry;\n\n    // we store file extensions in the table without\n    // leading dot.\n    kutil_WARN_DBG(ext_len == 0, \"ext_len == 0\");\n    kutil_WARN_DBG(ext[0] == '.', \"ext[0] == '.'\");\n    kutil_WARN_DBG(strnstr(ext, \"/\", ext_len) != NULL, \"strnstr(ext, /, ext_len)\");\n\n    entry =  hash_entry_str_create(ext, ext_len);\n    if(IS_ERR(entry)){\n        return PTR_ERR(entry);\n    }\n    hash_table_str_add(extensions->table, entry);\n    extensions->n_ext++;\n    return 0;\n}\n\n\nlong file_extensions_add_multiple(struct file_extensions* extensions,\n                        const char* ext_strs, size_t str_len){\n    long ret;\n    const char* end = ext_strs + str_len;\n    const char* s;\n\n    for(s = ext_strs; s < end; s++) {\n        if(*s == '/'){\n            size_t s_len = s - ext_strs;\n            if(unlikely(s_len < 1)){\n                pr_debug(\"empty extension passed\\n\");\n                return -EINVAL;\n            }\n            if(unlikely((ret=file_extensions_add(extensions, ext_strs, s_len)))){\n                return ret;\n            }\n            ext_strs = s + 1;\n        }\n    }\n    if(unlikely(s != ext_strs)){\n        pr_debug(\"extensions-string did not have trailing /\\n\");\n        return -EINVAL;\n    }\n    return 0;\n}\n\n\n\n/// Check if the file-extension (if any) of the given canonical path\n/// is contained within the struct file_extensions\nbool file_extensions_contain(struct file_extensions* extensions,\n                             const char* path, size_t path_len){\n    const char* str;\n    const char* const end = path + path_len - 1;\n\n    // empty paths are not allowed here\n    kutil_WARN_ON_DBG(path_len == 0);\n\n    if(*end == '.') return false;\n\n    // loop backwards through the string until the first slash (no extension)\n    // or dot is found\n    for(str = end - 1; str >= path; str-- ){\n        if(*str == '/'){\n            // nothing found\n            break;\n        }\n        if(*str == '.'){\n            struct hash_entry_str* entry = NULL;\n            const char* ext_start = str + 1;\n            size_t ext_len = end - ext_start + 1;\n            hash_table_str_find(extensions->table, entry, ext_start, ext_len);\n            return entry != NULL;\n        }\n    }\n    return false;\n}\n"
  },
  {
    "path": "kernel/kfileextensions.h",
    "content": "\n#pragma once\n\n#include \"shournalk_global.h\"\n\n#include <linux/hashtable.h>\n\n#define KFILEEXT_BITS 6\n\nstruct file_extensions {\n    DECLARE_HASHTABLE(table, KFILEEXT_BITS);\n    size_t n_ext; /* number of extensions within the table */\n};\n\nvoid file_extensions_init(struct file_extensions*);\nvoid file_extensions_cleanup(struct file_extensions*);\n\nlong file_extensions_add(struct file_extensions* extensions,\n                         const char* ext, size_t ext_len);\nlong file_extensions_add_multiple(struct file_extensions* extensions,\n                        const char* ext_strs, size_t str_len);\n\nbool file_extensions_contain(struct file_extensions* extensions,\n                             const char* path, size_t path_len);\n\n"
  },
  {
    "path": "kernel/kpathtree.c",
    "content": "\n#include \"kpathtree.h\"\n\n#include <linux/hashtable.h>\n#include <linux/slab.h>\n#include <linux/mm.h>\n#include <linux/sort.h>\n\n#include \"kutil.h\"\n#include \"xxhash_shournalk.h\"\n#include \"hash_table_str.h\"\n\n\nstatic int __compare_ints(const void *lhs, const void *rhs) {\n    int lhs_integer = *(const int *)(lhs);\n    int rhs_integer = *(const int *)(rhs);\n\n    if (lhs_integer < rhs_integer) return -1;\n    if (lhs_integer > rhs_integer) return 1;\n    return 0;\n}\n\nstatic bool __path_len_exists(struct kpathtree* pathtree, int path_len){\n    int i;\n    // maybe_todo: the path sizes are sorted,\n    // so this coul be improved. However, KPATHTREE_MAX_SIZE\n    // is small and adding a path not performance-critical..\n    for(i=0; i < pathtree->__n_path_sizes; i++){\n        if(pathtree->__path_sizes[i] == path_len){\n            return true;\n        }\n    }\n    return false;\n}\n\n\n\n\n////////////////////////////////////////////////////////////////////\n\nstruct kpathtree* kpathtree_create(void){\n    struct kpathtree* pathtree = kzalloc(sizeof (struct kpathtree), SHOURNALK_GFP);\n    if(!pathtree){\n        return ERR_PTR(-ENOMEM);\n    }\n    kpathtree_init(pathtree);\n    return pathtree;\n}\n\nvoid kpathtree_free(struct kpathtree* pathtree){\n    kpathtree_cleanup(pathtree);\n    kfree(pathtree);\n}\n\n\n/// pathtree must have been nulled before\nvoid kpathtree_init(struct kpathtree* pathtree){\n    WARN(pathtree->__is_init, \"pathtree already initialized!\");\n\n    pathtree->n_paths = 0;\n    pathtree->__n_path_sizes = 0;\n    hash_init(pathtree->path_table);\n    mutex_init(&pathtree->lock);\n    pathtree->__is_init = true;\n}\n\nvoid kpathtree_cleanup(struct kpathtree* pathtree){\n    if(! pathtree->__is_init){\n        WARN(1, \"pathtree not initialized!\");\n        return;\n    }\n    hash_table_str_cleanup(pathtree->path_table);\n\n    pathtree->__is_init = false;\n}\n\n\nlong kpathtree_add(struct kpathtree* pathtree, const char* path, int path_len){\n    struct hash_entry_str* entry;\n\n    if(pathtree->n_paths >= KPATHTREE_MAX_SIZE){\n        return -ENOSPC;\n    }\n    entry =  hash_entry_str_create(path, path_len);\n    if(IS_ERR(entry)){\n        return PTR_ERR(entry);\n    }\n    hash_table_str_add(pathtree->path_table, entry);\n    pathtree->n_paths++;\n\n    if(! __path_len_exists(pathtree, path_len)){\n        pathtree->__path_sizes[pathtree->__n_path_sizes] = path_len;\n        pathtree->__n_path_sizes++;\n        sort(pathtree->__path_sizes, pathtree->__n_path_sizes, sizeof(int), &__compare_ints, NULL);\n    }\n    return 0;\n}\n\n\nbool kpathtree_is_subpath(struct kpathtree* pathtree, const char* path,\n                          int path_len, bool allow_equals){\n    int i;\n    struct hash_entry_str* entry = NULL;\n\n    if(pathtree->n_paths == 0){\n        return false;\n    }\n    if(pathtree->__path_sizes[0] == 1){\n        // We contain the root node (if input is valid - else we don't care).\n        // As this function is only intended for file-paths, just:\n        return true;\n    }\n    for(i=0; i < pathtree->__n_path_sizes; i++){\n        int s = pathtree->__path_sizes[i];\n\n        if(s < path_len){\n            // If we didn't have a / at the next position, we would cut the\n            // path at a wrong position -> continue\n            if(path[s] != '/'){\n                continue;\n            }\n            // A candiate path with the same size exists.\n            // maybe_todo: incremental hash (but xxhash_update also\n            // has overhead..)\n            hash_table_str_find(pathtree->path_table, entry, path, (size_t)s);\n            if(entry != NULL){\n                return true;\n            }\n            // keep going\n        } else if(s > path_len) {\n            // __path_sizes is ordered ascending -> the\n            // next paths will be even longer:\n            return false;\n        } else {\n            // s == path.size\n            // The next m_orderedPathlength will be greater, so we can only\n            // be a 'sub'-path, if allow_equals is true.\n            if(allow_equals){\n                hash_table_str_find(pathtree->path_table, entry, path, (size_t)s);\n                return entry != NULL;\n            }\n            return false;\n        }\n    }\n    return false;\n}\n\n"
  },
  {
    "path": "kernel/kpathtree.h",
    "content": "\n#pragma once\n\n#include \"shournalk_global.h\"\n\n#include <linux/hashtable.h>\n#include <linux/mutex.h>\n\n\n#define KPATHTREE_BITS 6\n#define KPATHTREE_MAX_SIZE (1 << KPATHTREE_BITS)\n\n#define __KPATHTREE_INITIALIZER(treename) \\\n    { .n_paths = 0 \\\n    , .__n_path_sizes = 0 \\\n    , .__is_init = true \\\n    , .lock = __MUTEX_INITIALIZER(treename.lock) \\\n    , .path_table = { [0 ... ((1 << (KPATHTREE_BITS)) - 1)] = HLIST_HEAD_INIT } }\n\nstruct kpathtree {\n    DECLARE_HASHTABLE(path_table, KPATHTREE_BITS);\n    struct mutex lock;\n    int n_paths; /* number of paths alreay added */\n    int __path_sizes[KPATHTREE_MAX_SIZE];\n    int __n_path_sizes;\n    bool __is_init;\n};\n\n\nstruct kpathtree* kpathtree_create(void);\nvoid kpathtree_free(struct kpathtree* pathtree);\n\nvoid kpathtree_init(struct kpathtree* pathtree);\nvoid kpathtree_cleanup(struct kpathtree* pathtree);\n\nlong kpathtree_add(struct kpathtree* pathtree, const char* path, int path_len);\nbool kpathtree_is_subpath(struct kpathtree* pathtree, const char* path,\n                          int path_len, bool allow_equals);\n"
  },
  {
    "path": "kernel/kutil.c",
    "content": "\n#include <linux/dcache.h>\n#include <linux/file.h>\n#include <linux/fdtable.h>\n#include <linux/fs.h>\n#include <linux/slab.h>\n#include <linux/cred.h>\n#include <linux/uio.h>\n#include <linux/pagemap.h>\n#include <linux/pipe_fs_i.h>\n#include <linux/rcupdate.h>\n\n#include \"kutil.h\"\n\n#ifdef KVMALLOC_BACKPORT\n#include <linux/vmalloc.h>\n\nvoid *_kvmalloc_node_backport(size_t size, gfp_t flags, int node)\n{\n    gfp_t kmalloc_flags = flags;\n    void *ret;\n\n    /*\n     * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)\n     * so the given set of flags has to be compatible.\n     */\n    WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);\n\n    /*\n     * Make sure that larger requests are not too disruptive - no OOM\n     * killer and no allocation failure warnings as we have a fallback\n     */\n    if (size > PAGE_SIZE)\n        kmalloc_flags |= __GFP_NORETRY | __GFP_NOWARN;\n\n    ret = kmalloc_node(size, kmalloc_flags, node);\n\n    /*\n     * It doesn't really make sense to fallback to vmalloc for sub page\n     * requests\n     */\n    if (ret || size <= PAGE_SIZE)\n        return ret;\n\n    return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);\n}\n\n#endif\n\n\n/// Resolve the pathname of a regular, *not* deleted file.\n/// @param file The file to resolve the pathname of\n/// @param resolved_pathname a buffer of at least PATH_MAX size.\nchar* resolve_reg_filepath(struct files_struct *files,\n                           struct file *file,\n                           char * buf){\n    // see also proc_fd_link and\n    // https://stackoverflow.com/a/8250940/7015849\n    struct path *path;\n    char* pathname;\n\n    spin_lock(&files->file_lock);\n    if (!file || !S_ISREG(file_inode(file)->i_mode) ||\n            file_inode(file)->i_nlink == 0) {\n        spin_unlock(&files->file_lock);\n        return NULL;\n    }\n\n    path = &file->f_path;\n    path_get(path);\n    spin_unlock(&files->file_lock);\n\n    pathname = d_path(path, buf, PATH_MAX);\n    path_put(path);\n\n    if (IS_ERR(pathname)) {\n        return NULL;\n    }\n    return pathname;\n}\n\n\nssize_t kutil_kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos){\n    ssize_t ret;\n#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)\n    ret = kernel_write(file, buf, count, pos);\n#else\n    mm_segment_t fs_save;\n\n    fs_save = get_fs();\n    set_fs(get_ds());\n    ret = vfs_write(file, buf, count, pos);\n    set_fs(fs_save);\n#endif\n    return ret;\n}\n\n\nssize_t kutil_kernel_write_locked(struct file * file, const void *buf, size_t count)\n{\n    ssize_t ret;\n\n    mutex_lock(&file->f_pos_lock);\n\n    ret = kutil_kernel_write(file, buf, count, &file->f_pos);\n\n    mutex_unlock(&file->f_pos_lock);\n    return ret;\n}\n\n\nssize_t kutil_kernel_read_locked(struct file *file, void *buf, size_t count){\n    ssize_t ret;\n\n    mutex_lock(&file->f_pos_lock);\n\n    ret = kutil_kernel_read(file, buf, count, &file->f_pos);\n\n    mutex_unlock(&file->f_pos_lock);\n    return ret;\n}\n\n/// Try to read from file without disturbing the page cache.\n/// Eventually we should use mmap and something like\n/// MADV_FREE instead (zap_page_range)?\n/// Or re-suggest a similar approach Jens Axboe proposed in Dec 2019, e.g.\n///     [PATCH 1/5] fs: add read support for RWF_UNCACHED\n///     https://lwn.net/Articles/807519/\n/// Imaginable is e.g. a preadv2-flag RWF_CACHEFRIENDLY which does not\n/// call mark_page_accessed() when a page is read.\n///\n/// Anyhow, here we trick mm/filemap.c:filemap_read() (v5.12-rc5-3-g1e43c377a79f)\n/// into *not* calling mark_page_accessed by assigning ra->prev_pos to the current\n/// pos(ition):\n/// if (iocb->ki_pos >> PAGE_SHIFT !=\n///     ra->prev_pos >> PAGE_SHIFT)\n///     mark_page_accessed(pvec.pages[0]);\n/// Note that this only works, if we read one page, that's why we\n/// call kernel_read multiple times, if necessary.\n///\n/// Another approach might be to clear the page-reference-bit afterwards,\n/// but I'm not completely sure that this is legal..:\n/// struct address_space *mapping = file->f_mapping;\n/// struct page* page = find_get_entry(mapping, *pos / PAGE_SIZE);\n/// kernel_read();\n/// ClearPageReferenced(page);\nssize_t\nkutil_kernel_read_cachefriendly(struct file *file, void *buf, size_t count, loff_t *pos){\n    ssize_t read_size_total = 0;\n    struct file_ra_state *ra = &file->f_ra;\n\n    while(1){\n        ssize_t ret;\n        ssize_t current_count;\n        size_t count_to_page_end = round_up(*pos, PAGE_SIZE) - *pos;\n        if(count_to_page_end  == 0){\n            // At page-start.\n            count_to_page_end = PAGE_SIZE;\n        }\n\n        current_count = min(count - read_size_total, count_to_page_end);\n        ra->prev_pos = *pos;\n        ret = kutil_kernel_read(file, buf, current_count, pos);\n        if(unlikely(ret < 0)){\n            return ret;\n        }\n        read_size_total += ret;\n        if(ret < current_count || read_size_total >= (ssize_t)count){\n            // EOF or everything read\n            return read_size_total;\n        }\n        buf += ret;\n    }\n}\n\n\nvoid kutil_take_name_snapshot(struct kutil_name_snapshot* snapshot,\n                              struct dentry *dentry){\n\n    // see lib/vsprintf.c:dentry_name and fs/dcache.c:dentry_cmp\n    // -> apparently no locking is required for\n    // reading d_name.name, because:\n    // \"dentry name is guaranteed to be properly terminated with a NUL byte\".\n    // However, linux/dache.h:take_dentry_name_snapshot\n    // does locking (probably for consistent hash/len?).\n    // (unsafe: snapshot->name = dentry->d_name;)\n\n    const unsigned char *name;\n\n    rcu_read_lock();\n    name = READ_ONCE(dentry->d_name.name);\n    strncpy((char*)snapshot->inline_name,\n            (const char*)name, sizeof (snapshot->inline_name) - 1);\n    rcu_read_unlock();\n\n    // interface compatibility with struct name_snapshot from dcache.h\n    snapshot->name.name = snapshot->inline_name;\n    snapshot->name.len = (u32)strnlen((const char*)snapshot->name.name,\n                                 sizeof (snapshot->inline_name));\n    if(unlikely(snapshot->name.len == sizeof (snapshot->inline_name))){\n        snapshot->inline_name[sizeof (snapshot->inline_name) - 1] = '\\0';\n        pr_warn_once(\"Bug! Unterminated filename found: %s\", snapshot->name.name);\n    }\n}\n\n// interface compatibility with struct name_snapshot\nvoid kutil_release_name_snapshot(struct kutil_name_snapshot *name __attribute__ ((unused)))\n{}\n\n\n#ifdef kutil_BACKPORT_USE_MM\n\n#include <linux/mmu_context.h>\n\n// declare as weak to satisfy compiler. However,\n// one of use_mm or kthread_use_mm _must_ be defined (by kernel).\nvoid use_mm(struct mm_struct *mm) __attribute__((weak));\nvoid unuse_mm(struct mm_struct *mm) __attribute__((weak));\nvoid kthread_use_mm(struct mm_struct*) __attribute__((weak));\nvoid kthread_unuse_mm(struct mm_struct*) __attribute__((weak));\n\n\nvoid kutil_use_mm(struct mm_struct *mm) {\n    if(use_mm)\n        use_mm(mm);\n    else if(kthread_use_mm)\n        kthread_use_mm(mm);\n    else\n        pr_warn_once(\"kthread_use_mm and use_mm not defined - please report\");\n}\n\nvoid kutil_unuse_mm(struct mm_struct *mm) {\n    if(unuse_mm)\n        unuse_mm(mm);\n    else if(kthread_unuse_mm)\n        kthread_unuse_mm(mm);\n    else\n        pr_warn_once(\"kthread_unuse_mm and unuse_mm not defined - please report\");\n}\n\n#endif // kutil_BACKPORT_USE_MM\n\n\n// see commit cead18552660702a4a46f58e65188fe5f36e9dfe\n// declare as weak to satisfy compiler. However,\n// one of complete_and_exit and kthread_complete_and_exit\n// _must_ be defined (by kernel).\nvoid complete_and_exit(struct completion *comp, long code)__attribute__((weak));\nvoid kthread_complete_and_exit(struct completion *comp, long code)__attribute__((weak));\n\nvoid kutil_kthread_exit(struct completion *comp, long code){\n    if(complete_and_exit)\n        complete_and_exit(comp, code);\n    else if(kthread_complete_and_exit)\n        kthread_complete_and_exit(comp, code);\n    pr_err(\"Failed to stop kernel thread. Please unload this module \"\n           \"immediatly and report this fatal bug.\");\n}\n\n\n#ifdef RCU_WORK_BACKPORT\n\nstatic void rcu_work_rcufn(struct rcu_head *rcu)\n{\n    struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);\n    queue_work(rwork->wq, &rwork->work);\n}\n\nbool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)\n{\n    rwork->wq = wq;\n    call_rcu(&rwork->rcu, rcu_work_rcufn);\n    return true;\n}\n\n#endif // RCU_WORK_BACKPORT\n\n#ifdef GET_MEMCG_FROM_MM_BACKPORT\n\nstruct mem_cgroup *_get_mem_cgroup_from_mm_backport(struct mm_struct *mm)\n{\n    struct mem_cgroup *memcg = NULL;\n\n    if (unlikely(!mm))\n        return NULL;\n\n    rcu_read_lock();\n    do {\n        memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));\n        if (unlikely(!memcg))\n            break;\n\n    } while (!css_tryget_online(&memcg->css));\n    rcu_read_unlock();\n    return memcg;\n}\n\n#endif // GET_MEMCG_FROM_MM_BACKPORT\n\n\n\n"
  },
  {
    "path": "kernel/kutil.h",
    "content": "#pragma once\n\n#include \"shournalk_global.h\"\n\n#include <linux/cgroup.h>\n#include <linux/dcache.h>\n#include <linux/bug.h>\n#include <linux/fs.h>\n#include <linux/memcontrol.h>\n#include <linux/types.h>\n#include <linux/version.h>\n#include <linux/ptrace.h>\n#include <linux/sched.h>\n#include <linux/limits.h>\n#include <asm/syscall.h>\n#include <asm/uaccess.h>\n\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))\n#define mmgrab _mmgrab_backport\nstatic inline void _mmgrab_backport(struct mm_struct *mm) {\n    atomic_inc(&mm->mm_count);\n}\n#else\n#include <linux/sched/mm.h>\n#endif\n\nstruct pipe_inode_info;\n\n#ifdef DEBUG\n#define kutil_WARN_DBG WARN\n#define kutil_WARN_ON_DBG WARN_ON\n// if DEBUG always warn, else only once\n#define kutil_WARN_ONCE_IFN_DBG WARN\n#else\n#define kutil_WARN_DBG(condition, format...)\n#define kutil_WARN_ON_DBG(condition)\n#define kutil_WARN_ONCE_IFN_DBG WARN_ONCE\n#endif\n\n// see commit dcda9b04713c3f6ff0875652924844fae28286ea\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) && \\\n     !defined __GFP_RETRY_MAYFAIL\n#define __GFP_RETRY_MAYFAIL __GFP_REPEAT\n\n#endif\n\n// see commit a7c3e901a46ff54c016d040847eda598a9e3e653\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))\n#define KVMALLOC_BACKPORT\n\n#define kvmalloc_node _kvmalloc_node_backport\nvoid* _kvmalloc_node_backport(size_t size, gfp_t flags, int node);\n\n#define kvmalloc _kvmalloc_backport\nstatic inline void* _kvmalloc_backport(size_t size, gfp_t flags)\n{\n    return kvmalloc_node(size, flags, NUMA_NO_NODE);\n}\n\n#define kvzalloc _kvzalloc_backport\nstatic inline void* _kvzalloc_backport(size_t size, gfp_t flags)\n{\n    return kvmalloc(size, flags | __GFP_ZERO);\n}\n\n#endif // KVMALLOC_BACKPORT\n\n\nstatic inline int kutil_kthread_be_nice(void){\n    return cond_resched();\n}\n\nchar* resolve_reg_filepath(struct files_struct *files,\n                           struct file *file,\n                           char * buf);\n\nssize_t kutil_kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos);\nssize_t kutil_kernel_write_locked(struct file * file, const void *buf, size_t count);\n\n\nstatic inline ssize_t\nkutil_kernel_read(struct file *file, void *buf, size_t count, loff_t *pos){\n    ssize_t ret;\n#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)\n    ret = kernel_read(file, buf, count, pos);\n#else\n    mm_segment_t fs_save;\n\n    fs_save = get_fs();\n    set_fs(get_ds());\n    ret = vfs_read(file, buf, count, pos);\n    set_fs(fs_save);\n#endif\n    return ret;\n\n}\nssize_t kutil_kernel_read_locked(struct file *file, void *buf, size_t count);\nssize_t\nkutil_kernel_read_cachefriendly(struct file *file, void *buf, size_t count, loff_t *pos);\n\n\nstatic inline unsigned long kutil_get_first_arg_from_reg(struct pt_regs *regs){\n    // see 3c88ee194c288205733d248b51f0aca516ff4940\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) && defined CONFIG_X86_64\n    return regs->di;\n#else\n    return regs_get_kernel_argument(regs, 0);\n#endif\n\n}\n\n\nstruct kutil_name_snapshot {\n    struct qstr name;\n    unsigned char inline_name[NAME_MAX + 1];\n};\n\nvoid kutil_take_name_snapshot(struct kutil_name_snapshot *, struct dentry *);\nvoid kutil_release_name_snapshot(struct kutil_name_snapshot*);\n\n\n// Replacement for the older memalloc_use_memcg, memalloc_unuse_memcg,\n// see also commit b87d8cefe43c7f22e8aa13919c1dfa2b4b4b4e01\n// Actually (I think) it should be possible to call\n// set_active_memcg from KERNEL_VERSION(5, 10, 0) onwards\n// _but_ int_active_memcg is not exported as of 5.14.\n// current->active_memcg was introduced by\n// d46eb14b735b11927d4bdc2d1854c311af19de6d\n#if defined CONFIG_MEMCG && \\\n       (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))\nstatic inline struct mem_cgroup *\nkutil_set_active_memcg(struct mem_cgroup *memcg)\n{\n    struct mem_cgroup *old;\n    if (unlikely(in_interrupt())) {\n        kutil_WARN_ONCE_IFN_DBG(1, \"Called in_interrupt...\");\n        return NULL;\n    }\n\n    old = current->active_memcg;\n    current->active_memcg = memcg;\n    return old;\n}\n#else\nstatic inline struct mem_cgroup *\nkutil_set_active_memcg(struct mem_cgroup *memcg)\n{\n    (void)(memcg);\n    return NULL;\n}\n#endif\n\n\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))\n\n#define kutil_BACKPORT_USE_MM\n\n// see commit f5678e7f2ac31c270334b936352f0ef2fe7dd2b3\nvoid kutil_use_mm(struct mm_struct*);\nvoid kutil_unuse_mm(struct mm_struct*);\n\n// see commit 37c54f9bd48663f7657a9178fe08c47e4f5b537b\n#define USE_MM_SET_FS_OFF\n\n#else\n\n#define kutil_use_mm kthread_use_mm\n#define kutil_unuse_mm kthread_unuse_mm\n\n#endif\n\nvoid kutil_kthread_exit(struct completion *comp, long code);\n\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \\\n    !defined (INIT_RCU_WORK)\n#define RCU_WORK_BACKPORT\n#endif\n\n#ifdef RCU_WORK_BACKPORT\n\nstruct rcu_work {\n    struct work_struct work;\n    struct rcu_head rcu;\n\n    /* target workqueue ->rcu uses to queue ->work */\n    struct workqueue_struct *wq;\n};\nstatic inline struct rcu_work *to_rcu_work(struct work_struct *work)\n{\n    return container_of(work, struct rcu_work, work);\n}\n\n#define INIT_RCU_WORK(_work, _func)\t\t\t\t\t\\\n    INIT_WORK(&(_work)->work, (_func))\n\nbool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);\n\n#endif // RCU_WORK_BACKPORT\n\n\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))\n#define GET_MEMCG_FROM_MM_BACKPORT\n\n#define get_mem_cgroup_from_mm _get_mem_cgroup_from_mm_backport\nstruct mem_cgroup *_get_mem_cgroup_from_mm_backport(struct mm_struct *mm);\n\n#define mem_cgroup_put _mem_cgroup_put_backport\nstatic inline void _mem_cgroup_put_backport(struct mem_cgroup *memcg) {\n    if (memcg)\n        css_put(&memcg->css);\n}\n#endif\n\n\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))\n#define vfs_fadvise _vfs_fadvise_dummy\nstatic inline int\n_vfs_fadvise_dummy(struct file *file, loff_t offset, loff_t len,\n                              int advice){\n    (void)file;\n    (void)(offset);\n    (void)(len);\n    (void)(advice);\n    return 0;\n}\n#endif\n\n\n// see commit 47291baa8ddfdae10663624ff0a15ab165952708\n// and        a6435940b62f81a1718bf2bd46a051379fc89b9d\nstatic inline int\nkutil_inode_permission(struct path* path, int mask){\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)) && \\\n    !defined FS_ALLOW_IDMAP\n    return inode_permission(path->dentry->d_inode, mask);\n#else\n    return path_permission(path, mask);\n#endif\n}\n\n// see commit 077c212f0344a\n#if (LINUX_VERSION_CODE > KERNEL_VERSION(6, 7, 0))\nstatic inline time64_t kutil_get_mtime_sec(const struct inode *inode){\n    return inode_get_mtime_sec(inode);\n}\n#else\nstatic inline time64_t kutil_get_mtime_sec(const struct inode *inode){\n    return inode->i_mtime.tv_sec;\n}\n#endif\n\n\n// see f405df5de3170c00e5c54f8b7cf4766044a032ba\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))\n\n#define kuref_t             atomic_t\n#define kuref_sub_and_test  atomic_sub_and_test\n#define kuref_set           atomic_set\n#define kuref_inc_not_zero  atomic_inc_not_zero\n#define kuref_dec_and_test  atomic_dec_and_test\n\n#else\n\n#define kuref_t             refcount_t\n#define kuref_sub_and_test  refcount_sub_and_test\n#define kuref_set           refcount_set\n#define kuref_inc_not_zero  refcount_inc_not_zero\n#define kuref_dec_and_test  refcount_dec_and_test\n\n#endif\n\n"
  },
  {
    "path": "kernel/shournal_kio.c",
    "content": "\n#include \"shournal_kio.h\"\n#include <linux/slab.h>\n#include <linux/mm.h>\n#include <linux/file.h>\n\n#include \"kutil.h\"\n\n\nstruct kbuffered_file* shournal_kio_from_file(struct file* file, size_t bufsize){\n    char* buf;\n    struct kbuffered_file *buf_file;\n\n    buf = kvmalloc(bufsize, SHOURNALK_GFP | __GFP_RETRY_MAYFAIL);\n    if(! buf){\n        return ERR_PTR(-ENOMEM);\n    }\n    buf_file = kmalloc(sizeof(struct kbuffered_file), SHOURNALK_GFP);\n    if(!buf_file){\n        kvfree(buf);\n        return ERR_PTR(-ENOMEM);\n    }\n    buf_file->__file = file;\n    buf_file->__buf = buf;\n    buf_file->__pos = 0;\n    buf_file->__bufsize = bufsize;\n\n    return buf_file;\n}\n\nvoid shournal_kio_close(struct kbuffered_file* file){\n    shournal_kio_flush(file);\n    fput(file->__file);\n    kvfree(file->__buf);\n    kfree(file);\n}\n\n\nssize_t shournal_kio_write(struct kbuffered_file* file, const void *buf, size_t count){\n    ssize_t ret;\n    if(count > file->__bufsize){\n        // flush and write as a whole\n        if((ret = shournal_kio_flush(file)) < 0){\n            return ret;\n        }\n        return kutil_kernel_write_locked(file->__file, buf, count);\n    }\n\n    if(file->__pos + count > file->__bufsize){\n        if((ret = shournal_kio_flush(file)) < 0){\n            return ret;\n        }\n    }\n    memcpy(&file->__buf[file->__pos] , buf, count);\n    file->__pos += count;\n    return count;\n}\n\n/// @return The number of bytes written or neg. errno\nssize_t shournal_kio_flush(struct kbuffered_file* file){\n    ssize_t ret;\n    if(file->__pos == 0){\n        return 0;\n    }\n\n    ret = kutil_kernel_write_locked(\n                file->__file, file->__buf, file->__pos);\n    if(ret < 0){\n        return ret;\n    }\n    if(ret != file->__pos){\n        // maybe_todo: mmove the rest of our buffer to the beginning?\n        pr_devel(\"expected %ld bytes but wrote only %ld\\n\", file->__pos, ret);\n        return -EIO;\n    }\n    file->__pos = 0;\n    return ret;\n}\n\n"
  },
  {
    "path": "kernel/shournal_kio.h",
    "content": "\n#pragma once\n\n#include \"shournalk_global.h\"\n\n#include <linux/types.h>\n\n\nstruct file;\n\nstruct kbuffered_file {\n    struct file* __file;\n    char* __buf;\n    ssize_t __pos;\n    size_t __bufsize;\n};\n\n\nstruct kbuffered_file* shournal_kio_from_file(struct file* file, size_t bufsize);\nvoid shournal_kio_close(struct kbuffered_file* file);\n\nssize_t shournal_kio_write(struct kbuffered_file* file, const void *buf, size_t count);\nssize_t shournal_kio_flush(struct kbuffered_file* file);\n\n"
  },
  {
    "path": "kernel/shournalk_global.c",
    "content": "\n#include \"shournalk_global.h\"\n\n#include \"kpathtree.h\"\n\n\nstruct kpathtree g_dummy_pathtree;\n\nlong shournalk_global_constructor(void){\n    memset(&g_dummy_pathtree, 0, sizeof (struct kpathtree));\n    kpathtree_init(&g_dummy_pathtree);    \n    return 0;\n}\n\nvoid shournalk_global_destructor(void){\n    kpathtree_cleanup(&g_dummy_pathtree);\n}\n"
  },
  {
    "path": "kernel/shournalk_global.h",
    "content": "\n#pragma once\n// include module name in print_* messages:\n#ifdef pr_fmt\n#undef pr_fmt\n#endif\n#define pr_fmt(fmt) \"shournalk %s(): \" fmt, __func__\n\n#include <linux/gfp.h>\n\n#define SHOURNALK_GFP GFP_KERNEL_ACCOUNT | __GFP_NOWARN\n\nextern struct kpathtree g_dummy_pathtree;\n\nlong shournalk_global_constructor(void);\nvoid shournalk_global_destructor(void);\n"
  },
  {
    "path": "kernel/shournalk_main.c",
    "content": "#include \"shournalk_global.h\"\n\n#include <linux/kernel.h>\n#include <linux/module.h>\n#include <linux/init.h>\n#include <linux/delay.h>\n\nMODULE_LICENSE(\"GPL\");\nMODULE_AUTHOR(\"Tycho Kirchner\");\nMODULE_DESCRIPTION(\"Trace and collect metadata (path, hash, etc.) about \"\n                   \"file close-events recursively for specific pid's\");\nMODULE_VERSION(SHOURNAL_VERSION);\n\n\n#include \"event_handler.h\"\n#include \"event_handler.h\"\n#include \"shournalk_sysfs.h\"\n#include \"tracepoint_helper.h\"\n#include \"event_queue.h\"\n#include \"kutil.h\"\n\n#include \"shournalk_test.h\"\n\nstatic int __init shournalk_init(void)\n{\n    int ret;\n\n#ifdef DEBUG\n    if(! run_tests()){\n        return -EHOSTDOWN;\n    }\n#endif\n    if((ret = (int)shournalk_global_constructor()) != 0){\n        return ret;\n    }\n    if((ret = event_handler_constructor()) != 0)      goto error1;\n    if ((ret = tracepoint_helper_constructor()) != 0) goto error2;\n    if((ret = shournalk_sysfs_constructor()) != 0)    goto error3;\n\n    return 0;\n\nerror3:\n    tracepoint_helper_destructor();\nerror2:\n    event_handler_destructor();\nerror1:\n    shournalk_global_destructor();\n    return ret;\n}\n\nstatic void __exit shournalk_exit(void)\n{\n    // Be very careful about the order here.\n    shournalk_sysfs_destructor();    \n    tracepoint_helper_destructor();\n    event_handler_destructor();\n    shournalk_global_destructor();\n}\n\nmodule_init(shournalk_init)\nmodule_exit(shournalk_exit)\n\n\n"
  },
  {
    "path": "kernel/shournalk_sysfs.c",
    "content": "\n#include <linux/init.h>\n#include <linux/module.h>\n#include <linux/file.h>\n#include <linux/fs.h>\n#include <linux/fdtable.h>\n#include <linux/slab.h>\n#include <linux/uaccess.h>\n#include <linux/delay.h>\n\n#include \"shournalk_sysfs.h\"\n#include \"shournalk_user.h\"\n#include \"event_handler.h\"\n#include \"event_target.h\"\n#include \"kutil.h\"\n\n// Use «default attribute groups». Kernel v5.1-rc3,\n// aa30f47cf666111f6bbfd15f290a27e8a7b9d854 added default attribute groups\n// while v5.18-rc1, cdb4f26a63c391317e335e6e683a614358e70aeb\n// dropped legacy support. So we switch somewhere in the middle.\n#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 8, 0))\n#define SHOURNALK_USE_ATTR_GROUPS\n#endif\n\nstruct shournal_obj {\n    struct kobject kobj;\n};\n#define to_shournal_obj(x) container_of(x, struct shournal_obj, kobj)\n\nstruct shournal_attr {\n    struct attribute attr;\n    ssize_t (*show)(struct shournal_obj*, struct shournal_attr*, char*);\n    ssize_t (*store)(struct shournal_obj*, struct shournal_attr*, const char*, size_t);\n};\n#define to_shournal_attr(x) container_of(x, struct shournal_attr, attr)\n\n\n/// Entry point for all registered show-functions\nstatic ssize_t shournal_attr_show(struct kobject *kobj,\n                 struct attribute *attr,\n                 char *buf)\n{\n    struct shournal_attr *attribute;\n    struct shournal_obj *o;\n    attribute = to_shournal_attr(attr);\n    o = to_shournal_obj(kobj);\n    if (!attribute->show)\n        return -EIO;\n    return attribute->show(o, attribute, buf);\n}\n\n\n/// Entry point for all registered store-functions\nstatic ssize_t shournal_attr_store(struct kobject *kobj,\n                  struct attribute *attr,\n                  const char *buf, size_t len)\n{\n    struct shournal_attr *attribute;\n    struct shournal_obj *o;\n    attribute = to_shournal_attr(attr);\n    o = to_shournal_obj(kobj);\n    if (!attribute->store)\n        return -EIO;\n    return attribute->store(o, attribute, buf, len);\n}\n\nstatic struct sysfs_ops shournalk_ops = {\n    .show = shournal_attr_show,\n    .store = shournal_attr_store,\n};\n\n\nstatic ssize_t\n__mark(struct shournal_obj*, struct shournal_attr*, const char*, size_t);\nstatic ssize_t __show_version(struct shournal_obj *o __attribute__ ((unused)),\n                              struct shournal_attr* attr __attribute__ ((unused)),\n                              char *buf)\n{\n    return sprintf(buf, SHOURNAL_VERSION);\n}\n\nstatic struct shournal_attr attr_mark = __ATTR(mark, 0664, NULL, __mark);\nstatic struct shournal_attr attr_version = __ATTR(version, 0444, __show_version, NULL);\n\n\nstatic struct attribute *shournal_default_attrs[] = {\n    &attr_mark.attr,\n    &attr_version.attr,\n    NULL,\n};\n#ifdef SHOURNALK_USE_ATTR_GROUPS\nATTRIBUTE_GROUPS(shournal_default);\n#endif\n\n\nstatic void shournal_obj_release(struct kobject *kobj){\n    struct shournal_obj* o;\n    o = to_shournal_obj(kobj);\n    kfree(o);\n}\n\nstatic struct kobj_type shournal_kobj_ktype = {\n    .sysfs_ops\t= &shournalk_ops,\n#ifdef SHOURNALK_USE_ATTR_GROUPS\n    .default_groups = shournal_default_groups,\n#else\n    .default_attrs = (struct attribute **)&shournal_default_attrs,\n#endif\n    .release = shournal_obj_release,\n};\n\nstatic struct kset *shournal_kset;\nstatic struct shournal_obj *shournal_obj;\n\n/// Create kset and kobject and register our attribute function(s).\n/// kset *must* be created and set for kobject_uevent.\n/// See also: samples/kobject/kset-example.c\nint shournalk_sysfs_constructor(void){\n    int ret;\n\n    shournal_kset = kset_create_and_add(\"shournalk_root\", NULL, kernel_kobj);\n    if (!shournal_kset){\n        return -ENOMEM;\n    }\n\n    shournal_obj = kzalloc(sizeof(*shournal_obj), GFP_KERNEL);\n    if(! shournal_obj){\n        ret = -ENOMEM;\n        goto err_shournal_obj_alloc;\n    }\n    shournal_obj->kobj.kset = shournal_kset;\n\n    ret = kobject_init_and_add(&shournal_obj->kobj, &shournal_kobj_ktype, NULL,\n                               \"%s\", \"shournalk_ctrl\");\n    if (ret){\n        pr_err(\"kobject_init_and_add failed\");\n        goto err_shournal_obj_add;\n    }\n\n    if((ret=kobject_uevent(&shournal_obj->kobj, KOBJ_ADD) )) {\n        pr_warn(\"kobject_uevent failed\\n\");\n        goto err_shournal_obj_add;\n    }\n\n    return 0;\n\nerr_shournal_obj_add:\n    kobject_put(&shournal_obj->kobj);\nerr_shournal_obj_alloc:\n    kset_unregister(shournal_kset);\n\n    return ret;\n}\n\n\nvoid shournalk_sysfs_destructor(void){    \n    kobject_put(&shournal_obj->kobj);\n    kset_unregister(shournal_kset);\n}\n\n\n//////////////////////////////////////////////////////////////////////\n\n\nstatic long verify_hash_settings(struct shournalk_mark_struct * mark_struct){\n    if(mark_struct->settings.hash_max_count_reads == 0){\n        // hash is disabled. Be safe and also set chunksize to 0\n        mark_struct->settings.hash_chunksize = 0;\n        return 0;\n    }\n    // maybe_todo: remove the upper limit: partial hashing can handle this\n    // by digesting the max chunksize and *not* seeking afterwards.\n    if(mark_struct->settings.hash_chunksize < 8 ||\n       mark_struct->settings.hash_chunksize > PART_HASH_MAX_CHUNKSIZE){\n        pr_debug(\"Invalid hashsettings. Chunksize must be:\"\n                 \" between 8 and %d bytes\\n\", PART_HASH_MAX_CHUNKSIZE);\n        return -EINVAL;\n    }\n\n    if(mark_struct->settings.hash_max_count_reads < 1 ||\n       mark_struct->settings.hash_max_count_reads > 128){\n        pr_debug(\"Invalid hashsettings. Max count of reads \"\n                 \"must be between 1 and 128\\n\");\n        return -EINVAL;\n    }\n\n    return 0;\n}\n\nstatic long __handle_pid_add(struct shournalk_mark_struct* mark_struct){\n    pid_t pid = (pid_t)mark_struct->pid;\n    struct event_target* event_target;\n    // somewhat arbitrary limits\n    const int STORE_MAX_SIZE = 1024*1024 * 2;\n    const int STORE_MAX_FILECOUNT = 100;\n    long ret;\n    bool collect_exitcode = mark_struct->flags & SHOURNALK_MARK_COLLECT_EXITCODE;\n\n    if((ret = verify_hash_settings(mark_struct))){\n        return ret;\n    }\n    if(mark_struct->settings.r_store_max_size > STORE_MAX_SIZE){\n        pr_debug(\"r_store_max_size > %d\\n\", STORE_MAX_SIZE);\n        return -EINVAL;\n    }\n    if(mark_struct->settings.r_store_max_count_of_files > STORE_MAX_FILECOUNT){\n        pr_debug(\"r_store_max_count_of_files > %d\\n\", STORE_MAX_FILECOUNT);\n        return -EINVAL;\n    }\n\n    if(mark_struct->settings.w_max_event_count == 0 ||\n            mark_struct->settings.r_max_event_count == 0){\n        pr_debug(\"max_event_count(s) must not be zero\\n\");\n        return -EINVAL;\n    }\n\n    event_target = event_target_create(mark_struct);\n    if(IS_ERR(event_target)){\n        return PTR_ERR(event_target);\n    }\n    ret = event_handler_add_pid(event_target, pid, collect_exitcode);\n\n    event_target_put(event_target);\n    return ret;\n}\n\n\nstatic long __handle_pid_remove(const struct shournalk_mark_struct * mark_struct){\n    pid_t pid = (pid_t)mark_struct->pid;\n    return event_handler_remove_pid(pid);\n}\n\n/// @return length of passed string or neg. error\nstatic ssize_t __copy_path_from_user(char* buf, const char* __user src){\n   long str_len = strncpy_from_user(buf, src, PATH_MAX);\n   if(str_len <= 0){\n       pr_debug(\"strncpy_from_user returned %ld\\n\", str_len);\n       if(str_len < 0) return str_len;\n       return -EINVAL;\n   }\n   // we read something. While not a real sanity check, at least check for\n   // leading /\n   if(unlikely(buf[0] != '/')){\n       return -EINVAL;\n   }\n   return str_len;\n}\n\n/// If paths were not finalized yet, add param src to\n/// param pathtree\nstatic long __add_user_path(struct kpathtree* pathtree, const char* __user src){\n    long ret = 0;\n    char* path_tmp;\n\n    path_tmp = kzalloc(PATH_MAX, SHOURNALK_GFP);\n    if(!path_tmp) return -ENOMEM;\n\n    if((ret = __copy_path_from_user(path_tmp, src)) < 0){\n         goto out;\n    }\n    ret = kpathtree_add(pathtree, path_tmp, (int)ret);\n\nout:\n    kfree(path_tmp);\n    return ret;\n}\n\nstatic ssize_t\n__copy_file_extensions_from_user(char* buf, size_t buf_len, const char* __user src){\n   long str_len = strncpy_from_user(buf, src, buf_len);\n   if(str_len <= 0){\n       pr_debug(\"strncpy_from_user returned %ld\\n\", str_len);\n       if(str_len < 0) return str_len;\n       return -EINVAL;\n   }\n   // shortes possible allowed extension, inluding trailing /\n   // is e.g. o/\n   if(unlikely(str_len < 2)){\n       pr_debug(\"received extensions too short\\n\");\n       return -EINVAL;\n   }\n   return str_len;\n}\n\n/// If paths were not finalized yet, add param src to\n/// param pathtree\nstatic long __add_user_file_extensions(struct file_extensions* exts, const char* __user src){\n    long ret = 0;\n    char* ext_tmp;\n\n    ext_tmp = kzalloc(PAGE_SIZE, SHOURNALK_GFP);\n    if(!ext_tmp) return -ENOMEM;\n\n    if((ret = __copy_file_extensions_from_user(ext_tmp, PAGE_SIZE, src)) < 0){\n         goto out;\n    }\n    ret = file_extensions_add_multiple(exts, ext_tmp, (int)ret);\n\nout:\n    kfree(ext_tmp);\n    return ret;\n}\n\n\nstatic long __handle_mark_add(struct shournalk_mark_struct mark_struct){\n    long ret = -EINVAL;\n    struct event_target* t;\n    if(mark_struct.action == SHOURNALK_MARK_PID){\n        return __handle_pid_add(&mark_struct);\n    }\n\n    // for all other add-actions an existing event target is required\n    t = get_event_target_from_pid((pid_t)mark_struct.pid);\n    if(unlikely(IS_ERR(t))){\n        return PTR_ERR(t);\n    }\n\n    // locking applies to not committed targets only, so it is\n    // no problem to lock (a bit) early\n    mutex_lock(&t->lock);\n    if(unlikely(event_target_is_commited(t))){\n        pr_debug(\"invalid action %d - \"\n                 \"event-target is already committed\", mark_struct.action);\n        ret = -EBUSY;\n        goto unlock_put;\n    }\n\n    switch (mark_struct.action) {\n    case SHOURNALK_MARK_W_INCL:\n        ret = __add_user_path(&t->w_includes, mark_struct.data); break;\n    case SHOURNALK_MARK_W_EXCL:\n        ret = __add_user_path(&t->w_excludes, mark_struct.data); break;\n    case SHOURNALK_MARK_R_INCL:\n        ret = __add_user_path(&t->r_includes, mark_struct.data); break;\n    case SHOURNALK_MARK_R_EXCL:\n        ret = __add_user_path(&t->r_excludes, mark_struct.data); break;\n    case SHOURNALK_MARK_SCRIPT_INCL:\n        ret = __add_user_path(&t->script_includes, mark_struct.data); break;\n    case SHOURNALK_MARK_SCRIPT_EXCL:\n        ret = __add_user_path(&t->script_excludes, mark_struct.data); break;\n    case SHOURNALK_MARK_SCRIPT_EXTS:\n        ret = __add_user_file_extensions(&t->script_ext, mark_struct.data); break;\n    default:\n        ret = -EINVAL;\n    }\n\nunlock_put:\n    mutex_unlock(&t->lock);\n    event_target_put(t);\n    return ret;\n}\n\n\nstatic long __handle_mark_remove(struct shournalk_mark_struct mark_struct){\n    long ret;\n    switch (mark_struct.action) {\n    case SHOURNALK_MARK_PID:\n        ret = __handle_pid_remove(&mark_struct); break;\n    default:\n        ret = -EINVAL;\n    }\n    return ret;\n}\n\nstatic long __handle_commit(struct shournalk_mark_struct mark_struct){\n    long ret = 0;\n    struct event_target* event_target;\n    event_target = get_event_target_from_pid((pid_t)mark_struct.pid);\n    if(unlikely(IS_ERR(event_target))) {\n        return PTR_ERR(event_target);\n    }\n    mutex_lock(&event_target->lock);\n    if(likely(! event_target_is_commited(event_target))){\n         ret = event_target_commit(event_target);\n    } else {\n        pr_debug(\"__handle_commit: - \"\n                 \"event-target is already committed\");\n        ret = -EBUSY;\n    }\n    mutex_unlock(&event_target->lock);\n\n    event_target_put(event_target);\n    return ret;\n}\n\nstatic ssize_t __mark(struct shournal_obj* obj  __attribute__ ((unused)),\n                      struct shournal_attr* attr  __attribute__ ((unused)),\n                      const char* buf, size_t count){\n    const struct shournalk_mark_struct * s_mark;\n    ssize_t ret = 0;\n\n    if(count != sizeof (struct shournalk_mark_struct)){\n        return -EILSEQ;\n    }\n\n    s_mark = (const struct shournalk_mark_struct*) buf;\n\n    if(s_mark->flags & SHOURNALK_MARK_ADD)\n        ret = __handle_mark_add(*s_mark);\n    else if(s_mark->flags & SHOURNALK_MARK_REMOVE)\n        ret = __handle_mark_remove(*s_mark);\n    else if(s_mark->flags & SHOURNALK_MARK_COMMIT)\n        ret = __handle_commit(*s_mark);\n    else\n        ret = -EINVAL;\n\n    if(ret != 0){\n        WARN_ONCE(ret > 0, \"pos. error received\");\n        return ret;\n    }\n    return count;\n}\n\n\n\n"
  },
  {
    "path": "kernel/shournalk_sysfs.h",
    "content": "\n#pragma once\n#include \"shournalk_global.h\"\n\n\n\nint shournalk_sysfs_constructor(void);\n\nvoid shournalk_sysfs_destructor(void);\n"
  },
  {
    "path": "kernel/shournalk_test.c",
    "content": "\n\n#include \"shournalk_test.h\"\n\n#include \"kpathtree.h\"\n#include \"hash_table_str.h\"\n\n\n#define TEST_FAIL_ON(condition) ({\t\t\t\t\t\t\\\n    if(!!(condition)){  \\\n        pr_warn(\"test fail at %s:%d/%s\\n\", __FILE__, __LINE__, __func__); \\\n        goto test_err_out; \\\n    } \\\n})\n\n\nstatic bool test_kpathtree(void){\n    const char* p1 = \"/home/user1\";\n    const char* p2 = \"/home/user2\";\n    const char* p3 = \"/mnt/d\";\n    const char** current_ppath;\n\n    const char* subpaths[] = {\n        \"/home/user1/a\",\n        \"/home/user2/a\",\n        \"/home/user1/abc/defg\",\n        \"/home/user2/abc/defg__long_stuff.txt.tar.gz\",\n        \"/mnt/d/1\",\n        \"/mnt/d/2/abc/defg__long_stuff.txt.tar.gz\",\n        NULL\n    };\n\n    const char* nosubpaths[] = {\n        \"/home/user3/a\",\n        \"/home/user1\",\n        \"/home/user2\",\n        \"/home\",\n        \"/\",\n        \"/media/user1\",\n        \"/mnt/data\",\n        \"/mnt/e\",\n        \"/mnt/defghijk/lmnop\",\n        NULL\n    };\n\n    struct kpathtree* t = kpathtree_create();\n    TEST_FAIL_ON(IS_ERR(t));\n\n    // special case root node (all paths (except /) are subpaths\n    TEST_FAIL_ON(kpathtree_add(t, \"/\", 1));\n    for(current_ppath = subpaths; *current_ppath != NULL; current_ppath++){\n        // pr_info(\"current path: %s\\n\", *current_ppath);\n        TEST_FAIL_ON(! kpathtree_is_subpath(t, *current_ppath, (int)strlen(*current_ppath),0));\n    }\n    kpathtree_free(t);\n    t = kpathtree_create();\n    TEST_FAIL_ON(IS_ERR(t));\n\n    // before a path is added, all should fail:\n    for(current_ppath = subpaths; *current_ppath != NULL; current_ppath++){\n        TEST_FAIL_ON(kpathtree_is_subpath(t, *current_ppath, (int)strlen(*current_ppath),0));\n    }\n    for(current_ppath = nosubpaths; *current_ppath != NULL; current_ppath++){\n        TEST_FAIL_ON(kpathtree_is_subpath(t, *current_ppath, (int)strlen(*current_ppath),0));\n    }\n\n    TEST_FAIL_ON(kpathtree_add(t, p1, (int)strlen(p1)));\n    TEST_FAIL_ON(kpathtree_add(t, p2, (int)strlen(p2)));\n    TEST_FAIL_ON(kpathtree_add(t, p3, (int)strlen(p3)));\n\n    for(current_ppath = subpaths; *current_ppath != NULL; current_ppath++){\n        // pr_info(\"current path: %s\\n\", *current_ppath);\n        TEST_FAIL_ON(! kpathtree_is_subpath(t, *current_ppath, (int)strlen(*current_ppath),0));\n    }\n\n    for(current_ppath = nosubpaths; *current_ppath != NULL; current_ppath++){\n        // pr_info(\"current path: %s\\n\", *current_ppath);\n        TEST_FAIL_ON(kpathtree_is_subpath(t, *current_ppath, (int)strlen(*current_ppath),0));\n    }\n\n    kpathtree_free(t);\n    t = kpathtree_create();\n    TEST_FAIL_ON(IS_ERR(t));\n\n    // test allow_equals\n    TEST_FAIL_ON(kpathtree_add(t, p1, (int)strlen(p1)));\n    TEST_FAIL_ON(kpathtree_is_subpath(t, p1,(int)strlen(p1),0));\n    TEST_FAIL_ON(!kpathtree_is_subpath(t, p1,(int)strlen(p1),1));\n\n    kpathtree_free(t);\n    return true;\n\ntest_err_out:\n    if(! IS_ERR(t)) kpathtree_free(t);\n    return false;\n}\n\nstatic bool test_hash_table_str(void){\n    struct hash_entry_str* orig_e = NULL;\n    struct hash_entry_str* back_e = NULL;\n    const char* str1 = \"foobar\";\n    DEFINE_HASHTABLE(hash_table, 6);\n\n    orig_e = hash_entry_str_create(str1, strlen(str1));\n    TEST_FAIL_ON(IS_ERR_OR_NULL(orig_e));\n\n    hash_table_str_add(hash_table, orig_e);\n    hash_table_str_find(hash_table, back_e, str1, strlen(str1));\n    TEST_FAIL_ON( back_e == NULL);\n\n    hash_table_str_cleanup(hash_table);\n    // was just freed\n    orig_e = NULL;\n    back_e = NULL;\n    hash_table_str_find(hash_table, back_e, str1, strlen(str1));\n    TEST_FAIL_ON( back_e != NULL);\n\n    return true;\n\ntest_err_out:\n    if(! IS_ERR_OR_NULL(orig_e)) hash_entry_str_free(orig_e);\n    return false;\n}\n\n\nbool run_tests(void){\n    if(! test_kpathtree()) return false;\n    if(! test_hash_table_str()) return false;\n\n    pr_devel(\"Version %s - Tests successful!\\n\", SHOURNAL_VERSION);\n    return true;\n}\n"
  },
  {
    "path": "kernel/shournalk_test.h",
    "content": "\n#pragma once\n\n#include \"shournalk_global.h\"\n\n#include <linux/types.h>\n\nbool run_tests(void);\n"
  },
  {
    "path": "kernel/shournalk_user.h",
    "content": "/* Common header for kernel and userspace to control\n * shournalk via a sysfs interface\n */\n\n#pragma once\n\n#ifdef __KERNEL__\n\n#include <linux/types.h>\n\n#else\n\n#include <sys/types.h>\n#include <stdint.h>\n\n#endif // __KERNEL__\n\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#define SHOURNALK_INVALID_EXIT_CODE    -1\n\n/* flags */\n#define SHOURNALK_MARK_ADD              0x00000001\n#define SHOURNALK_MARK_REMOVE           0x00000002\n/* start collecting events after commit */\n#define SHOURNALK_MARK_COMMIT           0x00000004\n\n/* If this flag is set on MARK_PID, on process-tree end, return the\n   exitcode of the given pid in the run_result\n   (selected_exitcode). If the process did not end\n   (e.g. marked by another target) it is\n   SHOURNALK_INVALID_EXIT_CODE */\n#define SHOURNALK_MARK_COLLECT_EXITCODE 0x00000008\n\n\n/* actions */\n#define SHOURNALK_MARK_PID          100\n\n#define SHOURNALK_MARK_SCRIPT_INCL  111 /* include paths */\n#define SHOURNALK_MARK_SCRIPT_EXCL  112 /* exclude paths */\n#define SHOURNALK_MARK_SCRIPT_EXTS  113 /* file extensions */\n\n#define SHOURNALK_MARK_R_INCL       120\n#define SHOURNALK_MARK_R_EXCL       121\n\n#define SHOURNALK_MARK_W_INCL       130\n#define SHOURNALK_MARK_W_EXCL       131\n\n\nstruct shounalk_settings {\n    bool w_exclude_hidden;\n    uint64_t w_max_event_count; /* stop collecting after # written files */\n\n    bool r_only_writable;\n    bool r_exclude_hidden;\n    uint64_t r_max_event_count; /* stop collecting after # read files */\n\n    /* only store the content of a read file, if... */\n    bool r_store_only_writable; /* ...user also has write permission */\n    uint32_t r_store_max_size; /* ...file-size is less or equal to max_size */\n    uint16_t r_store_max_count_of_files; /* ...not already collected all desired files */\n    bool r_store_exclude_hidden; /* ...it is not hidden */\n\n    unsigned hash_max_count_reads; /* set to 0 to disable hash */\n    unsigned hash_chunksize;\n};\n\n/// Mark specific paths of specific pid's (and their children)\n/// for observation\nstruct shournalk_mark_struct {\n    int pipe_fd; /* stats are written here after event processing finished */\n    int target_fd; /* close events are written to this binary file */\n    int flags; /* ADD, REMOVE, COMMIT */\n    int action; /* PID, SCRIPT_INCL/EXCL */\n    uint64_t pid;\n\n    struct shounalk_settings settings;\n    const void* data;\n};\n\n/// Close events are written to a binary file.\n/// If 'bytes' is nonzero, the next N bytes are\n/// the complete file content.\n/// Followed by that is either the full filepath\n/// or filename (null-terminated). In case of a filename, the event occurred\n/// within the same directory as the previous event of the\n/// given type (O_RDONLY, O_WRONLY)\nstruct shournalk_close_event {\n    int flags; /* One of O_RDONLY, O_WRONLY, O_RDWR */\n    uint64_t mtime; /* as unix timestamp */\n    uint64_t size;\n    uint64_t mode;\n    uint64_t hash;\n    bool hash_is_null;\n    size_t bytes; /* read that many file-content-bytes next. */\n    /* filename as null-terminated cstring */\n};\n\n\n/// When the observation finishes, this struct is written to\n/// a pipe (created in user space) belonging to the notification\n/// group\nstruct shournalk_run_result {\n    int error_nb;\n    uint64_t w_event_count;      /* # of logged write-events */\n    uint64_t w_dropped_count;    /* # dropped exceeding max_event_count */\n    uint64_t r_event_count;      /* # of logged read-events */\n    uint64_t r_dropped_count;    /* # dropped exceeding max_event_count */\n    uint32_t stored_event_count; /* number of (read) files in event target file */\n    uint64_t lost_event_count;   /* if too many events occur, some may\n                                    be dropped for performance reasons. */\n    int selected_exitcode;       /* see SHOURNALK_MARK_COLLECT_EXITCODE */\n};\n\n\n#ifdef __cplusplus\n}\n#endif\n\n"
  },
  {
    "path": "kernel/tracepoint_helper.c",
    "content": "\n#include <linux/ftrace.h>\n#include <linux/tracepoint.h>\n#include <linux/version.h>\n#include <linux/file.h>\n#include <linux/slab.h>\n#include <linux/fs.h>\n#include <linux/task_work.h>\n\n\nstruct ftrace_ops;\n\n#include \"tracepoint_helper.h\"\n#include \"event_handler.h\"\n#include \"event_queue.h\"\n#include \"event_consumer.h\"\n#include \"kutil.h\"\n\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))\n#define USE_LEGACY_TRACEPOINTS\n#endif\n\n/// Type of kernel traces used in here.\nenum {SHOURNALK_TP_TRACE, SHOURNALK_TP_FTRACE};\n\n\nnoinline notrace static void\n__probe_sched_process_fork(void *data __attribute__ ((unused)),\n                         struct task_struct *parent,\n                         struct task_struct *child) {\n    event_handler_process_fork(parent, child);\n}\n\nnoinline notrace static void\n__probe_process_exit(unsigned long ip __attribute__ ((unused)),\n                     unsigned long parent_ip __attribute__ ((unused)),\n                     struct ftrace_ops *op __attribute__ ((unused)),\n                     struct pt_regs *regs){\n    struct task_struct *task;\n    task = (struct task_struct*)(\n                kutil_get_first_arg_from_reg(tracepoint_helper_get_ftrace_regs(regs)));\n    event_handler_process_exit(task);\n}\n\n/// Common structure to hold ftraces and tracepoints.\nstruct trace_entry {\n    char name[KSYM_NAME_LEN]; /* Don't use char* here! */\n    void *func; /* our own probe */\n    int tp_type; /* SHOURNALK_TP_TRACE, SHOURNALK_TP_FTRACE ... */\n    unsigned long flags;\n    void *tracepoint; /* tracepoint in kernel */\n    bool init;\n    struct ftrace_ops __ftrace_ops;\n};\n\n\n// see commit a25d036d939a30623ff73ecad9c8b9116b02e823 :\n// ftrace: Reverse what the RECURSION flag means in the ftrace_ops\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))\n#define SHOURNAL_FTRACE_RECURSION_SAFE FTRACE_OPS_FL_RECURSION_SAFE\n#else\n#define SHOURNAL_FTRACE_RECURSION_SAFE 0\n#endif\n\n\n#define __DEFAULT_FTRACE_FLAGS \\\n            FTRACE_OPS_FL_SAVE_REGS | SHOURNAL_FTRACE_RECURSION_SAFE\n\n\nstatic struct trace_entry interests[] = {\n\n    {.name = \"sched_process_fork\", .func = (void*)__probe_sched_process_fork,\n     .tp_type = SHOURNALK_TP_TRACE },\n\n    // Look at kernel/exit.c::do_exit : we need to run our exit\n    // hook *after* the remaining open files were closed,\n    // otherwise we would loose those events. Thus, the tracepoint\n    // sched_process_exit is too early. sched_process_free on the other hand\n    // runs too late, probably because free is called after parent processes\n    // finished waiting but we want to allow waiting for them.\n    // perf_event_exit_task would probably be ideal, but cannot be traced, but\n    // cgroup_exit (or exit_notify) seems to be fine. Note that some of the\n    // functions called within\n    // do_exit are inlined (dependent on kernel version), thus cannot be ftraced.\n    {.name = \"cgroup_exit\", .func = (void*)__probe_process_exit,\n     .tp_type = SHOURNALK_TP_FTRACE,\n     .flags = __DEFAULT_FTRACE_FLAGS},\n\n    // cannot use inline function fsnotify_close.\n    // __close_fd is too highlevel (doesn't trigger on process exit,\n    // CLO_EXEC files and (probably dup(2), etc.).\n    // By using locks_remove_file instead of __fput, we avoid duplicate check\n    // #ifdef FMODE_OPENED\n    //             unlikely(!(file->f_mode & FMODE_OPENED)) ||\n    // #endif\n    {.name = \"locks_remove_file\",  .func = (void*)event_handler_fput,\n     .tp_type = SHOURNALK_TP_FTRACE,\n     .flags = __DEFAULT_FTRACE_FLAGS},\n\n};\n\n\nstatic void init_ftrace_entry(struct trace_entry* e){\n    e->__ftrace_ops.func = e->func;\n    e->__ftrace_ops.flags =e->flags;\n}\n\n\n#define FOR_EACH_INTEREST(i) \\\n    for (i = 0; i < sizeof(interests) / sizeof(struct trace_entry); i++)\n\nstatic void init_interests(void){\n    size_t i;\n    FOR_EACH_INTEREST(i) {\n        interests[i].init = 0;\n        if(interests[i].tp_type == SHOURNALK_TP_FTRACE){\n            init_ftrace_entry(&interests[i]);\n        }\n    }\n}\n\n\n#ifndef USE_LEGACY_TRACEPOINTS\n// Tracepoints are not exported. Look them up.\nstatic void lookup_tracepoints(struct tracepoint *tp,\n                               void *ignore __attribute__ ((unused)) ) {\n    size_t i;\n    FOR_EACH_INTEREST(i) {\n        // pr_info(\"tracepoint: %s\\n\", tp->name);\n        if (strcmp(interests[i].name, tp->name) == 0){\n            interests[i].tracepoint = tp;\n        }\n    }\n}\n#endif // USE_LEGACY_TRACEPOINTS\n\nstatic int __register_tracepoint(struct trace_entry * entry){\n    int ret;\n#ifndef USE_LEGACY_TRACEPOINTS\n    if (entry->tracepoint == NULL) {\n        return -ENXIO;\n    }\n\n    ret = tracepoint_probe_register(entry->tracepoint, entry->func, NULL);\n#else\n    ret = tracepoint_probe_register(entry->name, entry->func, NULL);\n#endif\n    entry->init = ret == 0;\n    return ret;\n}\n\nstatic int __unregister_tracepoint(struct trace_entry * entry){\n    int ret;\n#ifndef USE_LEGACY_TRACEPOINTS\n    ret = tracepoint_probe_unregister(entry->tracepoint, entry->func, NULL);\n#else\n    ret = tracepoint_probe_unregister(entry->name, entry->func, NULL);\n#endif\n    entry->init = 0;\n    return ret;\n}\n\n\nstatic int __register_ftrace(struct trace_entry * entry){\n    int ret;\n\n    if((ret = ftrace_set_filter(&entry->__ftrace_ops, entry->name, strlen(entry->name), 0)) < 0){\n        pr_warn(\"ftrace_set_filter %s failed\\n\", entry->name);\n        return ret;\n    }\n    ret = register_ftrace_function(&entry->__ftrace_ops);\n    entry->init = ret == 0;\n    return ret;\n}\n\nstatic int __unregister_ftrace(struct trace_entry * entry){\n    int ret;\n    ret = unregister_ftrace_function(&entry->__ftrace_ops);\n    entry->init = 0;\n    return ret;\n}\n\n\n\nstatic void cleanup(void) {\n    size_t i;\n    FOR_EACH_INTEREST(i) {\n        int ret = 0;\n        struct trace_entry* e = &interests[i];\n        if (! e->init) {\n            continue;\n        }\n\n        switch (e->tp_type) {\n        case SHOURNALK_TP_TRACE: ret = __unregister_tracepoint(e); break;\n        case SHOURNALK_TP_FTRACE: ret = __unregister_ftrace(e); break;\n        default: WARN_ON(1); break;\n        }\n        if(ret != 0){\n            pr_warn(\"failed to unregister trace %s\\n\", e->name);\n        }\n\n    }\n}\n\n\n\nint tracepoint_helper_constructor(void) {\n    size_t i;\n    int ret = 0;\n    init_interests();\n\n#ifndef USE_LEGACY_TRACEPOINTS\n    for_each_kernel_tracepoint(lookup_tracepoints, NULL);\n#endif\n\n    FOR_EACH_INTEREST(i) {\n        struct trace_entry* e = &interests[i];\n        switch (e->tp_type) {\n        case SHOURNALK_TP_TRACE: ret = __register_tracepoint(e);break;\n        case SHOURNALK_TP_FTRACE: ret =  __register_ftrace(e);break;\n        default: WARN_ON(1); ret = -1; break;\n        }\n        if(ret != 0){\n            pr_warn(\"Failed to register trace %s\\n\", e->name);\n            // Unload previously loaded\n            cleanup();\n            return ret;\n        }\n    }\n    return 0;\n}\n\n\nvoid tracepoint_helper_destructor(void) {\n    cleanup();\n}\n\n"
  },
  {
    "path": "kernel/tracepoint_helper.h",
    "content": "#pragma once\n#include \"shournalk_global.h\"\n#include <linux/version.h>\n#include <linux/ftrace.h>\n\nstruct pt_regs;\n\nint tracepoint_helper_constructor(void);\n\nvoid tracepoint_helper_destructor(void);\n\nstatic inline struct pt_regs*\ntracepoint_helper_get_ftrace_regs(struct pt_regs* regs){\n#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) && \\\n    ! defined arch_ftrace_get_regs\n    return regs;\n#else\n    // see commit d19ad0775dcd64b49eecf4fa79c17959ebfbd26b and\n    // 02a474ca266a47ea8f4d5a11f4ffa120f83730ad\n    // ftrace: Have the callbacks receive a struct ftrace_regs\n    // instead of pt_regs\n    struct ftrace_regs* fregs = (struct ftrace_regs*)regs;\n    return ftrace_get_regs(fregs);\n#endif\n}\n"
  },
  {
    "path": "kernel/xxhash_shournalk.c",
    "content": "/*\n * xxHash - Extremely Fast Hash algorithm\n * Copyright (C) 2012-2016, Yann Collet.\n *\n * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *   * Redistributions of source code must retain the above copyright\n *     notice, this list of conditions and the following disclaimer.\n *   * Redistributions in binary form must reproduce the above\n *     copyright notice, this list of conditions and the following disclaimer\n *     in the documentation and/or other materials provided with the\n *     distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n * This program is free software; you can redistribute it and/or modify it under\n * the terms of the GNU General Public License version 2 as published by the\n * Free Software Foundation. This program is dual-licensed; you may select\n * either version 2 of the GNU General Public License (\"GPL\") or BSD license\n * (\"BSD\").\n *\n * You can contact the author at:\n * - xxHash homepage: http://cyan4973.github.io/xxHash/\n * - xxHash source repository: https://github.com/Cyan4973/xxHash\n */\n\n// See commit 5f60d5f \"move asm/unaligned.h to linux/unaligned.h\"\n#if __has_include(<asm/unaligned.h>)\n#include <asm/unaligned.h>\n#else\n#include <linux/unaligned.h>\n#endif\n\n#include <linux/errno.h>\n#include <linux/compiler.h>\n#include <linux/kernel.h>\n#include <linux/module.h>\n#include <linux/string.h>\n\n\n#include \"xxhash_shournalk.h\"\n\n/*-*************************************\n * Macros\n **************************************/\n#define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r)))\n#define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))\n\n#ifdef __LITTLE_ENDIAN\n# define XXH_CPU_LITTLE_ENDIAN 1\n#else\n# define XXH_CPU_LITTLE_ENDIAN 0\n#endif\n\n/*-*************************************\n * Constants\n **************************************/\nstatic const uint32_t PRIME32_1 = 2654435761U;\nstatic const uint32_t PRIME32_2 = 2246822519U;\nstatic const uint32_t PRIME32_3 = 3266489917U;\nstatic const uint32_t PRIME32_4 =  668265263U;\nstatic const uint32_t PRIME32_5 =  374761393U;\n\nstatic const uint64_t PRIME64_1 = 11400714785074694791ULL;\nstatic const uint64_t PRIME64_2 = 14029467366897019727ULL;\nstatic const uint64_t PRIME64_3 =  1609587929392839161ULL;\nstatic const uint64_t PRIME64_4 =  9650029242287828579ULL;\nstatic const uint64_t PRIME64_5 =  2870177450012600261ULL;\n\n/*-**************************\n *  Utils\n ***************************/\nvoid xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src)\n{\n\tmemcpy(dst, src, sizeof(*dst));\n}\n\n\nvoid xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src)\n{\n\tmemcpy(dst, src, sizeof(*dst));\n}\n\n\n/*-***************************\n * Simple Hash Functions\n ****************************/\nstatic uint32_t xxh32_round(uint32_t seed, const uint32_t input)\n{\n\tseed += input * PRIME32_2;\n\tseed = xxh_rotl32(seed, 13);\n\tseed *= PRIME32_1;\n\treturn seed;\n}\n\nuint32_t xxh32(const void *input, const size_t len, const uint32_t seed)\n{\n\tconst uint8_t *p = (const uint8_t *)input;\n\tconst uint8_t *b_end = p + len;\n\tuint32_t h32;\n\n\tif (len >= 16) {\n\t\tconst uint8_t *const limit = b_end - 16;\n\t\tuint32_t v1 = seed + PRIME32_1 + PRIME32_2;\n\t\tuint32_t v2 = seed + PRIME32_2;\n\t\tuint32_t v3 = seed + 0;\n\t\tuint32_t v4 = seed - PRIME32_1;\n\n\t\tdo {\n\t\t\tv1 = xxh32_round(v1, get_unaligned_le32(p));\n\t\t\tp += 4;\n\t\t\tv2 = xxh32_round(v2, get_unaligned_le32(p));\n\t\t\tp += 4;\n\t\t\tv3 = xxh32_round(v3, get_unaligned_le32(p));\n\t\t\tp += 4;\n\t\t\tv4 = xxh32_round(v4, get_unaligned_le32(p));\n\t\t\tp += 4;\n\t\t} while (p <= limit);\n\n\t\th32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) +\n\t\t\txxh_rotl32(v3, 12) + xxh_rotl32(v4, 18);\n\t} else {\n\t\th32 = seed + PRIME32_5;\n\t}\n\n\th32 += (uint32_t)len;\n\n\twhile (p + 4 <= b_end) {\n\t\th32 += get_unaligned_le32(p) * PRIME32_3;\n\t\th32 = xxh_rotl32(h32, 17) * PRIME32_4;\n\t\tp += 4;\n\t}\n\n\twhile (p < b_end) {\n\t\th32 += (*p) * PRIME32_5;\n\t\th32 = xxh_rotl32(h32, 11) * PRIME32_1;\n\t\tp++;\n\t}\n\n\th32 ^= h32 >> 15;\n\th32 *= PRIME32_2;\n\th32 ^= h32 >> 13;\n\th32 *= PRIME32_3;\n\th32 ^= h32 >> 16;\n\n\treturn h32;\n}\n\n\nstatic uint64_t xxh64_round(uint64_t acc, const uint64_t input)\n{\n\tacc += input * PRIME64_2;\n\tacc = xxh_rotl64(acc, 31);\n\tacc *= PRIME64_1;\n\treturn acc;\n}\n\nstatic uint64_t xxh64_merge_round(uint64_t acc, uint64_t val)\n{\n\tval = xxh64_round(0, val);\n\tacc ^= val;\n\tacc = acc * PRIME64_1 + PRIME64_4;\n\treturn acc;\n}\n\nuint64_t xxh64(const void *input, const size_t len, const uint64_t seed)\n{\n\tconst uint8_t *p = (const uint8_t *)input;\n\tconst uint8_t *const b_end = p + len;\n\tuint64_t h64;\n\n\tif (len >= 32) {\n\t\tconst uint8_t *const limit = b_end - 32;\n\t\tuint64_t v1 = seed + PRIME64_1 + PRIME64_2;\n\t\tuint64_t v2 = seed + PRIME64_2;\n\t\tuint64_t v3 = seed + 0;\n\t\tuint64_t v4 = seed - PRIME64_1;\n\n\t\tdo {\n\t\t\tv1 = xxh64_round(v1, get_unaligned_le64(p));\n\t\t\tp += 8;\n\t\t\tv2 = xxh64_round(v2, get_unaligned_le64(p));\n\t\t\tp += 8;\n\t\t\tv3 = xxh64_round(v3, get_unaligned_le64(p));\n\t\t\tp += 8;\n\t\t\tv4 = xxh64_round(v4, get_unaligned_le64(p));\n\t\t\tp += 8;\n\t\t} while (p <= limit);\n\n\t\th64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +\n\t\t\txxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);\n\t\th64 = xxh64_merge_round(h64, v1);\n\t\th64 = xxh64_merge_round(h64, v2);\n\t\th64 = xxh64_merge_round(h64, v3);\n\t\th64 = xxh64_merge_round(h64, v4);\n\n\t} else {\n\t\th64  = seed + PRIME64_5;\n\t}\n\n\th64 += (uint64_t)len;\n\n\twhile (p + 8 <= b_end) {\n\t\tconst uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));\n\n\t\th64 ^= k1;\n\t\th64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;\n\t\tp += 8;\n\t}\n\n\tif (p + 4 <= b_end) {\n\t\th64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;\n\t\th64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;\n\t\tp += 4;\n\t}\n\n\twhile (p < b_end) {\n\t\th64 ^= (*p) * PRIME64_5;\n\t\th64 = xxh_rotl64(h64, 11) * PRIME64_1;\n\t\tp++;\n\t}\n\n\th64 ^= h64 >> 33;\n\th64 *= PRIME64_2;\n\th64 ^= h64 >> 29;\n\th64 *= PRIME64_3;\n\th64 ^= h64 >> 32;\n\n\treturn h64;\n}\n\n\n/*-**************************************************\n * Advanced Hash Functions\n ***************************************************/\nvoid xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed)\n{\n\t/* use a local state for memcpy() to avoid strict-aliasing warnings */\n\tstruct xxh32_state state;\n\n\tmemset(&state, 0, sizeof(state));\n\tstate.v1 = seed + PRIME32_1 + PRIME32_2;\n\tstate.v2 = seed + PRIME32_2;\n\tstate.v3 = seed + 0;\n\tstate.v4 = seed - PRIME32_1;\n\tmemcpy(statePtr, &state, sizeof(state));\n}\n\n\nvoid xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)\n{\n\t/* use a local state for memcpy() to avoid strict-aliasing warnings */\n\tstruct xxh64_state state;\n\n\tmemset(&state, 0, sizeof(state));\n\tstate.v1 = seed + PRIME64_1 + PRIME64_2;\n\tstate.v2 = seed + PRIME64_2;\n\tstate.v3 = seed + 0;\n\tstate.v4 = seed - PRIME64_1;\n\tmemcpy(statePtr, &state, sizeof(state));\n}\n\n\nint xxh32_update(struct xxh32_state *state, const void *input, const size_t len)\n{\n\tconst uint8_t *p = (const uint8_t *)input;\n\tconst uint8_t *const b_end = p + len;\n\n\tif (input == NULL)\n\t\treturn -EINVAL;\n\n\tstate->total_len_32 += (uint32_t)len;\n\tstate->large_len |= (len >= 16) | (state->total_len_32 >= 16);\n\n\tif (state->memsize + len < 16) { /* fill in tmp buffer */\n\t\tmemcpy((uint8_t *)(state->mem32) + state->memsize, input, len);\n\t\tstate->memsize += (uint32_t)len;\n\t\treturn 0;\n\t}\n\n\tif (state->memsize) { /* some data left from previous update */\n\t\tconst uint32_t *p32 = state->mem32;\n\n\t\tmemcpy((uint8_t *)(state->mem32) + state->memsize, input,\n\t\t\t16 - state->memsize);\n\n\t\tstate->v1 = xxh32_round(state->v1, get_unaligned_le32(p32));\n\t\tp32++;\n\t\tstate->v2 = xxh32_round(state->v2, get_unaligned_le32(p32));\n\t\tp32++;\n\t\tstate->v3 = xxh32_round(state->v3, get_unaligned_le32(p32));\n\t\tp32++;\n\t\tstate->v4 = xxh32_round(state->v4, get_unaligned_le32(p32));\n\t\tp32++;\n\n\t\tp += 16-state->memsize;\n\t\tstate->memsize = 0;\n\t}\n\n\tif (p <= b_end - 16) {\n\t\tconst uint8_t *const limit = b_end - 16;\n\t\tuint32_t v1 = state->v1;\n\t\tuint32_t v2 = state->v2;\n\t\tuint32_t v3 = state->v3;\n\t\tuint32_t v4 = state->v4;\n\n\t\tdo {\n\t\t\tv1 = xxh32_round(v1, get_unaligned_le32(p));\n\t\t\tp += 4;\n\t\t\tv2 = xxh32_round(v2, get_unaligned_le32(p));\n\t\t\tp += 4;\n\t\t\tv3 = xxh32_round(v3, get_unaligned_le32(p));\n\t\t\tp += 4;\n\t\t\tv4 = xxh32_round(v4, get_unaligned_le32(p));\n\t\t\tp += 4;\n\t\t} while (p <= limit);\n\n\t\tstate->v1 = v1;\n\t\tstate->v2 = v2;\n\t\tstate->v3 = v3;\n\t\tstate->v4 = v4;\n\t}\n\n\tif (p < b_end) {\n\t\tmemcpy(state->mem32, p, (size_t)(b_end-p));\n\t\tstate->memsize = (uint32_t)(b_end-p);\n\t}\n\n\treturn 0;\n}\n\n\nuint32_t xxh32_digest(const struct xxh32_state *state)\n{\n\tconst uint8_t *p = (const uint8_t *)state->mem32;\n\tconst uint8_t *const b_end = (const uint8_t *)(state->mem32) +\n\t\tstate->memsize;\n\tuint32_t h32;\n\n\tif (state->large_len) {\n\t\th32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) +\n\t\t\txxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18);\n\t} else {\n\t\th32 = state->v3 /* == seed */ + PRIME32_5;\n\t}\n\n\th32 += state->total_len_32;\n\n\twhile (p + 4 <= b_end) {\n\t\th32 += get_unaligned_le32(p) * PRIME32_3;\n\t\th32 = xxh_rotl32(h32, 17) * PRIME32_4;\n\t\tp += 4;\n\t}\n\n\twhile (p < b_end) {\n\t\th32 += (*p) * PRIME32_5;\n\t\th32 = xxh_rotl32(h32, 11) * PRIME32_1;\n\t\tp++;\n\t}\n\n\th32 ^= h32 >> 15;\n\th32 *= PRIME32_2;\n\th32 ^= h32 >> 13;\n\th32 *= PRIME32_3;\n\th32 ^= h32 >> 16;\n\n\treturn h32;\n}\n\n\nint xxh64_update(struct xxh64_state *state, const void *input, const size_t len)\n{\n\tconst uint8_t *p = (const uint8_t *)input;\n\tconst uint8_t *const b_end = p + len;\n\n\tif (input == NULL)\n\t\treturn -EINVAL;\n\n\tstate->total_len += len;\n\n\tif (state->memsize + len < 32) { /* fill in tmp buffer */\n\t\tmemcpy(((uint8_t *)state->mem64) + state->memsize, input, len);\n\t\tstate->memsize += (uint32_t)len;\n\t\treturn 0;\n\t}\n\n\tif (state->memsize) { /* tmp buffer is full */\n\t\tuint64_t *p64 = state->mem64;\n\n\t\tmemcpy(((uint8_t *)p64) + state->memsize, input,\n\t\t\t32 - state->memsize);\n\n\t\tstate->v1 = xxh64_round(state->v1, get_unaligned_le64(p64));\n\t\tp64++;\n\t\tstate->v2 = xxh64_round(state->v2, get_unaligned_le64(p64));\n\t\tp64++;\n\t\tstate->v3 = xxh64_round(state->v3, get_unaligned_le64(p64));\n\t\tp64++;\n\t\tstate->v4 = xxh64_round(state->v4, get_unaligned_le64(p64));\n\n\t\tp += 32 - state->memsize;\n\t\tstate->memsize = 0;\n\t}\n\n\tif (p + 32 <= b_end) {\n\t\tconst uint8_t *const limit = b_end - 32;\n\t\tuint64_t v1 = state->v1;\n\t\tuint64_t v2 = state->v2;\n\t\tuint64_t v3 = state->v3;\n\t\tuint64_t v4 = state->v4;\n\n\t\tdo {\n\t\t\tv1 = xxh64_round(v1, get_unaligned_le64(p));\n\t\t\tp += 8;\n\t\t\tv2 = xxh64_round(v2, get_unaligned_le64(p));\n\t\t\tp += 8;\n\t\t\tv3 = xxh64_round(v3, get_unaligned_le64(p));\n\t\t\tp += 8;\n\t\t\tv4 = xxh64_round(v4, get_unaligned_le64(p));\n\t\t\tp += 8;\n\t\t} while (p <= limit);\n\n\t\tstate->v1 = v1;\n\t\tstate->v2 = v2;\n\t\tstate->v3 = v3;\n\t\tstate->v4 = v4;\n\t}\n\n\tif (p < b_end) {\n\t\tmemcpy(state->mem64, p, (size_t)(b_end-p));\n\t\tstate->memsize = (uint32_t)(b_end - p);\n\t}\n\n\treturn 0;\n}\n\n\nuint64_t xxh64_digest(const struct xxh64_state *state)\n{\n\tconst uint8_t *p = (const uint8_t *)state->mem64;\n\tconst uint8_t *const b_end = (const uint8_t *)state->mem64 +\n\t\tstate->memsize;\n\tuint64_t h64;\n\n\tif (state->total_len >= 32) {\n\t\tconst uint64_t v1 = state->v1;\n\t\tconst uint64_t v2 = state->v2;\n\t\tconst uint64_t v3 = state->v3;\n\t\tconst uint64_t v4 = state->v4;\n\n\t\th64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +\n\t\t\txxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);\n\t\th64 = xxh64_merge_round(h64, v1);\n\t\th64 = xxh64_merge_round(h64, v2);\n\t\th64 = xxh64_merge_round(h64, v3);\n\t\th64 = xxh64_merge_round(h64, v4);\n\t} else {\n\t\th64  = state->v3 + PRIME64_5;\n\t}\n\n\th64 += (uint64_t)state->total_len;\n\n\twhile (p + 8 <= b_end) {\n\t\tconst uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));\n\n\t\th64 ^= k1;\n\t\th64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;\n\t\tp += 8;\n\t}\n\n\tif (p + 4 <= b_end) {\n\t\th64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;\n\t\th64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;\n\t\tp += 4;\n\t}\n\n\twhile (p < b_end) {\n\t\th64 ^= (*p) * PRIME64_5;\n\t\th64 = xxh_rotl64(h64, 11) * PRIME64_1;\n\t\tp++;\n\t}\n\n\th64 ^= h64 >> 33;\n\th64 *= PRIME64_2;\n\th64 ^= h64 >> 29;\n\th64 *= PRIME64_3;\n\th64 ^= h64 >> 32;\n\n\treturn h64;\n}\n\n\nMODULE_LICENSE(\"Dual BSD/GPL\");\nMODULE_DESCRIPTION(\"xxHash\");\n"
  },
  {
    "path": "kernel/xxhash_shournalk.h",
    "content": "/*\n * xxHash - Extremely Fast Hash algorithm\n * Copyright (C) 2012-2016, Yann Collet.\n *\n * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n *   * Redistributions of source code must retain the above copyright\n *     notice, this list of conditions and the following disclaimer.\n *   * Redistributions in binary form must reproduce the above\n *     copyright notice, this list of conditions and the following disclaimer\n *     in the documentation and/or other materials provided with the\n *     distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n * This program is free software; you can redistribute it and/or modify it under\n * the terms of the GNU General Public License version 2 as published by the\n * Free Software Foundation. This program is dual-licensed; you may select\n * either version 2 of the GNU General Public License (\"GPL\") or BSD license\n * (\"BSD\").\n *\n * You can contact the author at:\n * - xxHash homepage: http://cyan4973.github.io/xxHash/\n * - xxHash source repository: https://github.com/Cyan4973/xxHash\n */\n\n/*\n * Notice extracted from xxHash homepage:\n *\n * xxHash is an extremely fast Hash algorithm, running at RAM speed limits.\n * It also successfully passes all tests from the SMHasher suite.\n *\n * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2\n * Duo @3GHz)\n *\n * Name            Speed       Q.Score   Author\n * xxHash          5.4 GB/s     10\n * CrapWow         3.2 GB/s      2       Andrew\n * MumurHash 3a    2.7 GB/s     10       Austin Appleby\n * SpookyHash      2.0 GB/s     10       Bob Jenkins\n * SBox            1.4 GB/s      9       Bret Mulvey\n * Lookup3         1.2 GB/s      9       Bob Jenkins\n * SuperFastHash   1.2 GB/s      1       Paul Hsieh\n * CityHash64      1.05 GB/s    10       Pike & Alakuijala\n * FNV             0.55 GB/s     5       Fowler, Noll, Vo\n * CRC32           0.43 GB/s     9\n * MD5-32          0.33 GB/s    10       Ronald L. Rivest\n * SHA1-32         0.28 GB/s    10\n *\n * Q.Score is a measure of quality of the hash function.\n * It depends on successfully passing SMHasher test set.\n * 10 is a perfect score.\n *\n * A 64-bits version, named xxh64 offers much better speed,\n * but for 64-bits applications only.\n * Name     Speed on 64 bits    Speed on 32 bits\n * xxh64       13.8 GB/s            1.9 GB/s\n * xxh32        6.8 GB/s            6.0 GB/s\n */\n\n////////////////////////////\n\n/* Tycho Kirchner, Oct 2020 - tychokirchner@mail.de\n * Old kernels do not provide xxhash. Further, at least on Debian Buster and\n * Ubuntu 18.04, xxhash's symbols cannot be found on module insert, although the symbols\n * are exported (and also do not appear in /proc/kallsyms, maybe because they are\n * nowhere used in the kernel?). Therefor, I copied the source from Linux 4.19.0\n * in here, renamed it and removed the EXPORT_SYMBOL-calls.\n */\n\n#pragma once\n\n#include <linux/types.h>\n\n/*-****************************\n * Simple Hash Functions\n *****************************/\n\n/**\n * xxh32() - calculate the 32-bit hash of the input with a given seed.\n *\n * @input:  The data to hash.\n * @length: The length of the data to hash.\n * @seed:   The seed can be used to alter the result predictably.\n *\n * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s\n *\n * Return:  The 32-bit hash of the data.\n */\nuint32_t xxh32(const void *input, size_t length, uint32_t seed);\n\n/**\n * xxh64() - calculate the 64-bit hash of the input with a given seed.\n *\n * @input:  The data to hash.\n * @length: The length of the data to hash.\n * @seed:   The seed can be used to alter the result predictably.\n *\n * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems.\n *\n * Return:  The 64-bit hash of the data.\n */\nuint64_t xxh64(const void *input, size_t length, uint64_t seed);\n\n/*-****************************\n * Streaming Hash Functions\n *****************************/\n\n/*\n * These definitions are only meant to allow allocation of XXH state\n * statically, on stack, or in a struct for example.\n * Do not use members directly.\n */\n\n/**\n * struct xxh32_state - private xxh32 state, do not use members directly\n */\nstruct xxh32_state {\n\tuint32_t total_len_32;\n\tuint32_t large_len;\n\tuint32_t v1;\n\tuint32_t v2;\n\tuint32_t v3;\n\tuint32_t v4;\n\tuint32_t mem32[4];\n\tuint32_t memsize;\n};\n\n/**\n * struct xxh32_state - private xxh64 state, do not use members directly\n */\nstruct xxh64_state {\n\tuint64_t total_len;\n\tuint64_t v1;\n\tuint64_t v2;\n\tuint64_t v3;\n\tuint64_t v4;\n\tuint64_t mem64[4];\n\tuint32_t memsize;\n};\n\n/**\n * xxh32_reset() - reset the xxh32 state to start a new hashing operation\n *\n * @state: The xxh32 state to reset.\n * @seed:  Initialize the hash state with this seed.\n *\n * Call this function on any xxh32_state to prepare for a new hashing operation.\n */\nvoid xxh32_reset(struct xxh32_state *state, uint32_t seed);\n\n/**\n * xxh32_update() - hash the data given and update the xxh32 state\n *\n * @state:  The xxh32 state to update.\n * @input:  The data to hash.\n * @length: The length of the data to hash.\n *\n * After calling xxh32_reset() call xxh32_update() as many times as necessary.\n *\n * Return:  Zero on success, otherwise an error code.\n */\nint xxh32_update(struct xxh32_state *state, const void *input, size_t length);\n\n/**\n * xxh32_digest() - produce the current xxh32 hash\n *\n * @state: Produce the current xxh32 hash of this state.\n *\n * A hash value can be produced at any time. It is still possible to continue\n * inserting input into the hash state after a call to xxh32_digest(), and\n * generate new hashes later on, by calling xxh32_digest() again.\n *\n * Return: The xxh32 hash stored in the state.\n */\nuint32_t xxh32_digest(const struct xxh32_state *state);\n\n/**\n * xxh64_reset() - reset the xxh64 state to start a new hashing operation\n *\n * @state: The xxh64 state to reset.\n * @seed:  Initialize the hash state with this seed.\n */\nvoid xxh64_reset(struct xxh64_state *state, uint64_t seed);\n\n/**\n * xxh64_update() - hash the data given and update the xxh64 state\n * @state:  The xxh64 state to update.\n * @input:  The data to hash.\n * @length: The length of the data to hash.\n *\n * After calling xxh64_reset() call xxh64_update() as many times as necessary.\n *\n * Return:  Zero on success, otherwise an error code.\n */\nint xxh64_update(struct xxh64_state *state, const void *input, size_t length);\n\n/**\n * xxh64_digest() - produce the current xxh64 hash\n *\n * @state: Produce the current xxh64 hash of this state.\n *\n * A hash value can be produced at any time. It is still possible to continue\n * inserting input into the hash state after a call to xxh64_digest(), and\n * generate new hashes later on, by calling xxh64_digest() again.\n *\n * Return: The xxh64 hash stored in the state.\n */\nuint64_t xxh64_digest(const struct xxh64_state *state);\n\n/*-**************************\n * Utils\n ***************************/\n\n/**\n * xxh32_copy_state() - copy the source state into the destination state\n *\n * @src: The source xxh32 state.\n * @dst: The destination xxh32 state.\n */\nvoid xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src);\n\n/**\n * xxh64_copy_state() - copy the source state into the destination state\n *\n * @src: The source xxh64 state.\n * @dst: The destination xxh64 state.\n */\nvoid xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src);\n"
  },
  {
    "path": "shell-integration-scripts/CMakeLists.txt",
    "content": "\nconfigure_file( _source_me_generic.sh _source_me_generic.sh @ONLY)\n\n# Merge script files into _integration_ko.sh\n# Write to temporary so the real target only gets updated\n# if its content has changed\nset(integration_ko_tmp \"${CMAKE_CURRENT_BINARY_DIR}/integration_ko.sh_tmp\")\nfile(WRITE ${integration_ko_tmp} \"\")\nforeach(f\n        integration_main.sh.in util.sh integration_ko.sh)\n  append_to_file(${integration_ko_tmp} ${f})\nendforeach()\n# Copy the temporary file to the final location\nconfigure_file(${integration_ko_tmp} _integration_ko.sh @ONLY)\n\n# Merge script files into _integration_fan.sh\nset(integration_fan_tmp \"${CMAKE_CURRENT_BINARY_DIR}/integration_fan.sh_tmp\")\nfile(WRITE ${integration_fan_tmp} \"\")\nforeach(f\n        integration_main.sh.in util.sh integration_fan.sh)\n  append_to_file(${integration_fan_tmp} ${f})\nendforeach()\nconfigure_file(${integration_fan_tmp} _integration_fan.sh @ONLY)\n\n\nadd_custom_target(target_SOURCE_SHELLSCRIPTS ALL\n  COMMAND ${CMAKE_COMMAND} -E create_symlink _source_me_generic.sh SOURCE_ME.bash\n  COMMAND ${CMAKE_COMMAND} -E create_symlink _source_me_generic.sh SOURCE_ME.zsh\n\n  # keep those for backwards compatibility (SOURCE_ME.$shell should be used\n  # in general as it automatically selects the correct backend)\n  COMMAND ${CMAKE_COMMAND} -E create_symlink _integration_ko.sh integration_ko.bash\n  COMMAND ${CMAKE_COMMAND} -E create_symlink _integration_ko.sh integration_ko.zsh\n\n  COMMAND ${CMAKE_COMMAND} -E create_symlink _integration_fan.sh integration_fan.bash\n  COMMAND ${CMAKE_COMMAND} -E create_symlink _integration_fan.sh integration_fan.zsh\n)\n\ninstall(FILES\n    \"${CMAKE_CURRENT_BINARY_DIR}/_source_me_generic.sh\"\n    \"${CMAKE_CURRENT_BINARY_DIR}/SOURCE_ME.bash\"\n    \"${CMAKE_CURRENT_BINARY_DIR}/SOURCE_ME.zsh\"\n\n    \"${CMAKE_CURRENT_BINARY_DIR}/_integration_ko.sh\"\n    \"${CMAKE_CURRENT_BINARY_DIR}/integration_ko.bash\"\n    \"${CMAKE_CURRENT_BINARY_DIR}/integration_ko.zsh\"\n\n    \"${CMAKE_CURRENT_BINARY_DIR}/_integration_fan.sh\"\n    \"${CMAKE_CURRENT_BINARY_DIR}/integration_fan.bash\"\n    \"${CMAKE_CURRENT_BINARY_DIR}/integration_fan.zsh\"\n\n    DESTINATION ${shournal_install_dir_script}\n)\n\n"
  },
  {
    "path": "shell-integration-scripts/_source_me_generic.sh",
    "content": "# Select which shell-integration to be sourced based on config\n# and availability.\n# The backend is chosen in the following order:\n# • variable SHOURNAL_BACKEND\n# • config file at shournal's user cfg-dir\n# • config file at /etc...\n# Else default to the ko-backend or, if not found, the fanotify-backend.\n\n__shournal_eprint(){\n    >&2 printf \"shournal-backend-selection: $*\\n\"\n}\n\n__shournal_cmd_exists(){\n     [ -x \"$(command -v \"$1\")\" ]\n}\n\n__shournal_select_backend(){\n    local scriptname=\"$1\"\n    local this_shell=\"$2\"\n    local backend_name_ko=\"integration_ko.$this_shell\"\n    local backend_name_fan=\"integration_fan.$this_shell\"\n    local backend_name_selected=\"\"\n    local backend_origin=\"UNKNOWN\"\n\n\n    if [ -n \"${SHOURNAL_BACKEND+x}\" ]; then\n        backend_name_selected=\"$SHOURNAL_BACKEND\"\n        backend_origin=\"variable SHOURNAL_BACKEND\"\n    else\n        for p in \"$HOME/.config/shournal/backend\" \\\n                 \"/etc/shournal.d/backend\"; do\n            if test -f \"$p\"; then\n                read -r backend_name_selected < \"$p\"\n                backend_origin=\"$p\"\n                break\n            fi\n        done\n    fi\n\n    case \"$backend_name_selected\" in\n    '')\n        : ;; # use fallback below\n    ko)\n        backend_name_selected=\"$backend_name_ko\";;\n    fanotify)\n        backend_name_selected=\"$backend_name_fan\";;\n    *)\n        __shournal_eprint \"Unsupported backend '$backend_name_selected' set in\" \\\n                          \"'$backend_origin'. Supported options: [fanotify, ko].\" \\\n                          \"Using defaults...\"\n        backend_name_selected=\"\" ;;\n    esac\n\n    if [ -z \"$backend_name_selected\" ]; then\n        if __shournal_cmd_exists 'shournal-run'; then\n            backend_name_selected=\"$backend_name_ko\"\n        elif __shournal_cmd_exists 'shournal-run-fanotify'; then\n            backend_name_selected=\"$backend_name_fan\"\n        else\n            __shournal_eprint \"Error: commands shournal-run and \" \\\n                              \"shournal-run-fanotify were not found in PATH.\"\n            return 1\n        fi\n    fi\n    . \"$(dirname -- \"$scriptname\")/$backend_name_selected\"\n}\n\n__shournal_select_backend_return=0\nif [ -n \"${BASH_VERSION+x}\" ]; then\n    __shournal_select_backend \"$BASH_SOURCE\" bash ||\n        __shournal_select_backend_return=$?\nelif [ -n \"${ZSH_VERSION+x}\" ]; then\n    # This has to be in global scope, $0 is different within function.\n    __shournal_select_backend \"$0\" zsh ||\n        __shournal_select_backend_return=$?\nelse\n    __shournal_eprint \"called from unsupported shell [currently only bash is supported]\"\n    __shournal_select_backend_return=1\nfi\n\nunset __shournal_eprint\nunset __shournal_cmd_exists\nunset __shournal_select_backend\n\nreturn $__shournal_select_backend_return\n"
  },
  {
    "path": "shell-integration-scripts/integration_fan.sh",
    "content": "\n# shell-integration for shournal - fanotify backend.\n\n\n_shournal_run_backend='shournal-run-fanotify'\n\n\n_shournal_enable(){\n    local ret=0\n    if [ -n \"${_shournal_is_running+x}\" ] ; then\n        _shournal_warn_on '! _libshournal_is_loaded'\n        _shournal_debug \"_shournal_enable: current session is already observed\"\n        return 0\n    fi\n\n    if [ -n \"${_shournal_shell_exec_string+x}\" ]; then\n        _shournal_handle_exec_string || return $?\n    fi\n\n    # This shell was _not_ invoked with the sh -c '...' option,\n    # so clear our flag unconditionally:\n    unset _shournal_parent_launched_us_noninteractive\n\n    if ! _libshournal_is_loaded ; then\n        if [ -n \"${_shournal_enable_just_called+x}\" ]; then\n            _shournal_warn \"Something went wrong during preloading of \" \\\n                           \"libshournal-shellwatch.so, the shell integration \" \\\n                           \"is _not_ enabled.\"\n            unset _shournal_enable_just_called\n            return 1\n        fi\n        _shournal_interactive_exec_allowed || return $?\n        _shournal_exec_ldpreloaded_shell\n        # only get here on error\n        return 1\n    fi\n    _shournal_debug \"_shournal_enable: about to enable...\"\n    unset _shournal_enable_just_called\n\n    if [ -n \"${LD_PRELOAD+x}\" ] ; then\n        # note: processes launched by this shell will not be observed by the preloaded library!\n        LD_PRELOAD=${LD_PRELOAD//:$SHOURNAL_PATH_LIB_SHELL_INTEGRATION/}\n    fi\n\n    _libshournal_enable || return 1\n    _shournal_remove_prompts\n    _shournal_add_prompts\n    _shournal_is_running=true\n    return 0\n}\n\n\n_shournal_disable(){\n    local exitcode=$1\n\n    _shournal_debug \"_shournal_disable: about to disable\" \\\n                    \"with exitcode $exitcode\"\n\n    if [ -n \"${_shournal_shell_exec_string+x}\" ]; then\n        if [ -n \"${_shournal_parent_launched_us_noninteractive+x}\" ]; then\n             _shournal_warn \"SHOURNAL_DISABLE called, but the fanotify-based\" \\\n                       \"shell integration cannot be disabled for the\" \\\n                       \"'$_SHOURNAL_SHELL_NAME -c' invocation. Please use the\" \\\n                       \"kernel backend if this is a strong requirement.\"\n            return 1\n        else\n            _shournal_debug \"_shournal_parent_launched_us_noninteractive is not\" \\\n                            \"set, likely disable was called without a\" \\\n                            \"prior enable.\"\n            return 0\n        fi\n    fi\n\n    if [ -z \"${_shournal_is_running+x}\" ]; then\n        _shournal_debug \"_shournal_disable: not running\"\n        return 0\n    fi\n\n    # In case we were called in a sequence, e.g.\n    # $ (exit 123); SHOURNAL_DISABLE\n    # we should cleanup first (otherwise the command was lost).\n    # Be careful to avoid endless shutdown recursion here\n    # ( _shournal_postexec may call SHOURNAL_DISABLE in case of erros)\n    if [ -z \"${_shournal_during_shutdown+x}\" ]; then\n        _shournal_during_shutdown=true\n        _shournal_postexec \"$exitcode\"\n    fi\n    _libshournal_disable || _shournal_warn \"_libshournal_disable failed\"\n    _shournal_remove_prompts\n\n    unset _shournal_during_shutdown\n    unset _shournal_is_running\n    return 0\n}\n\n\n# Check if it is _ok_ to call exec.\n# The shell is running interactive (no *_EXECUTION_STRING)\n# and _libshournal is not (pre-)loaded yet. To do so we\n# (currently) have to call exec. While doing so from\n# .shrc is fine, loading from interactive shell is ok (non-exported\n# variables are lost) we want to exclude the case where\n# commands are called in a row after SHOURNAL_ENABLE e.g.\n# $ SHOURNAL_ENABLE; important-command\n# because those are lost.\n_shournal_interactive_exec_allowed(){\n    local current_cmd\n    current_cmd=\"$(_shournal_print_current_cmd)\"\n    current_cmd=\"$(_shournal_trim \"$current_cmd\")\"\n    if [ -z \"$current_cmd\" ]; then\n        _shournal_debug \"_shournal_enable exec granted, history \" \\\n                        \"is empty (likely called from .shrc)\"\n        return 0\n    fi\n\n    if ! _shournal_endswith \"$current_cmd\" 'SHOURNAL_ENABLE'; then\n        _shournal_warn \"Command after SHOURNAL_ENABLE detected but\" \\\n                       \"we have to call exec first to enable the\" \\\n                       \"fanotify based shell integration.\" \\\n                       \"Please ENABLE as separate command\" \\\n                       \"or switch backend. Command was '$current_cmd'\"\n        return 1\n    fi\n    return 0\n}\n\n_shournal_set_verbosity(){\n    local ret=0\n    # for libshournal-shellwatch.so\n    export _SHOURNAL_LIB_SHELL_VERBOSITY=\"$1\"\n    if _libshournal_is_loaded; then\n        _libshournal_update_verbosity || ret=$?\n    fi\n    return $ret\n}\n\n_shournal_print_versions(){\n    echo \"shournal-run-fanotify: $(shournal-run-fanotify --version)\"\n    if _libshournal_is_loaded ; then\n        _libshournal_print_version || _shournal_warn \"printing version failed.\"\n    else\n        echo \"To see the version of shournal's shell-integration \" \\\n             \" (libshournal-shellwatch.so) please SHOURNAL_ENABLE first\"\n    fi\n    return 0\n}\n\n\n# If our libshournal-shellwatch.so is not loaded yet, do so\n# by \"relaunching\" this process (exec with same args)\n# within the \"original\" mount namespace.\n_shournal_exec_ldpreloaded_shell(){\n    declare -a args_array\n    local cmd_path\n    local IFS; unset IFS\n\n    if ! [ -f \"${SHOURNAL_PATH_LIB_SHELL_INTEGRATION-}\" ]; then\n        _shournal_error \"Please provide a valid path for libshournal-shellwatch.so, e.g. \" \\\n                        \"export SHOURNAL_PATH_LIB_SHELL_INTEGRATION=\" \\\n                        \"'/usr/local/lib/shournal/libshournal-shellwatch.so'\"\n        return 1\n    fi\n    cmd_path=\"$(readlink /proc/$$/exe)\"\n    while IFS= read -r -d '' line; do\n        args_array+=(\"$line\")\n    done < /proc/$$/cmdline\n    export _shournal_enable_just_called=true\n    export LD_PRELOAD=${LD_PRELOAD-}\":$SHOURNAL_PATH_LIB_SHELL_INTEGRATION\"\n    _shournal_debug \"_shournal_exec_ldpreloaded_shell: calling preloaded \" \\\n                    \"$cmd_path ${args_array[@]:1}\"\n    # Relaunch the shell with shournals .so preloaded using the original arguments.\n    exec shournal --verbosity \"$_SHOURNAL_VERBOSITY\"  \\\n        --backend-filename \"$_shournal_run_backend\" \\\n        --msenter-orig-mountspace \\\n        --exec-filename \"$cmd_path\" --exec -- \"${args_array[@]}\"\n    # only get here on error\n    return 1\n\n}\n\n# Handle the sh -c '...' case. No PS0 (bash) or preexec_functions (zsh)\n_shournal_handle_exec_string(){\n    local cmd_trimmed\n    local cmd_path\n    declare -a args_array\n    local IFS; unset IFS\n\n    # In *this* backend we simply re-exec ourselves\n    # with shournal and monitor the whole command sequence (SHOURNAL_DISABLE not\n    # possible). Note that technically it would be possible to\n    # move the original shell-binary somewhere else, execute a shournal-fake-shell\n    # instead and invoke the original shell preloaded, so\n    # we could allow for flexible enabling/disabling. This however would\n    # be somewhat involved and possibly require one-time setup by the user.\n    # On the other hand the ko-based shell integration does offer this\n    # flexibility, so here we just ensure correctness:\n    # We may only re-exec, if no command was executed before we were enabled,\n    # otherwise it would be executed twice! Therefore we allow only two\n    # cases: SHOURNAL_ENABLE as first command of the invocation e.g.\n    # *sh -c 'SHOURNAL_ENABLE; ...' or when called from .shrc (which\n    # must have been sourced before the command invocation starts.\n\n\n    if [ -n \"${_shournal_parent_launched_us_noninteractive+x}\" ] ; then\n        _shournal_debug \"_shournal_handle_exec_string: we are (likely) already observed,\" \\\n                        \"_shournal_parent_launched_us_noninteractive is set.\" \\\n                        \"NOT performing re-exec\"\n        return 0\n    fi\n\n    cmd_trimmed=\"$(_shournal_trim \"$_shournal_shell_exec_string\")\"\n    if ! _shournal_startswith \"$cmd_trimmed\" 'SHOURNAL_ENABLE' &&\n       ! _shournal_verbose_reexec_allowed; then\n        _shournal_warn \"we were enabled _during_ the $_SHOURNAL_SHELL_NAME -c\" \\\n                       \"invocation, however, the fanotify backend only supports\" \\\n                       \"enabling _before_ or at the beginning of the invocation.\" \\\n                       \"Either switch to the kernel backend or\" \\\n                       \"put SHOURNAL_ENABLE into your shell's rc or at the\" \\\n                       \"beginning of the invocation. It may also be\" \\\n                       \"possible to directly call the command with 'shournal -e ...'\"\n        return 1\n    fi\n\n    cmd_path=\"$(readlink /proc/$$/exe)\"\n    while IFS= read -r -d '' line; do\n        args_array+=(\"$line\")\n    done < /proc/$$/cmdline\n\n    _shournal_debug \"_shournal_handle_exec_string: exec non-interactive\" \\\n                    \"$cmd_path ${args_array[@]}\"\n\n    # arg --fork: do not wait writing to the database. Otherwise\n    # it blocks of course.\n    _shournal_parent_launched_us_noninteractive=true exec \\\n        shournal --backend-filename \"$_shournal_run_backend\" \\\n        --verbosity \"$_SHOURNAL_VERBOSITY\" \\\n        --exec-filename \"$cmd_path\" --exec --fork -- \"${args_array[@]}\"\n    # only get here on error\n    return 1\n}\n\n\n_shournal_preexec_generic(){\n    _libshournal_prepare_cmd || :\n}\n\n\n_shournal_postexec_generic(){\n    local cmd_str=\"$1\"\n    local exitcode=\"$2\"\n\n    _shournal_debug \"_shournal_postexec\"\n\n    # user might modify history settings at any time, so better be safe:\n    if ! _shournal_verbose_history_check; then\n        _shournal_warn \"history settings were modified after the shell \" \\\n                       \"integration was turned on. \" \\\n                       \"Turning the shell integration off...\"\n        # Be careful to avoid endless shutdown recursion here.\n        if [ -z \"${_shournal_during_shutdown+x}\" ]; then\n            _shournal_during_shutdown=true\n            SHOURNAL_DISABLE\n        fi\n        return 1\n    fi\n    export _SHOURNAL_LAST_COMMAND=\"$cmd_str\"\n    export _SHOURNAL_LAST_RETURN_VALUE=$exitcode\n\n    # cleanup may fail regularily, this function may also be executed\n    # when no command ran before (e.g. when hitting enter on a blank line in bash)\n    _libshournal_cleanup_cmd || :\n\n    unset _SHOURNAL_LAST_COMMAND\n    unset _SHOURNAL_LAST_RETURN_VALUE\n\n    return 0\n}\n\n# «Send» messages to libshournal-shellwatch.so by exporting a\n# variable and triggering a dummy-close event.\n_shournal_trigger_update(){\n    local desired_state=\"$1\"\n    local trigger_response\n    local ret\n\n    export _LIBSHOURNAL_TRIGGER=\"$desired_state\";\n    export _SHOURNAL_SHELL_PID=$$\n\n    # Note: our .so detects this special (non-)filename and\n    # writes its response to an unnamed tmp file.\n    ret=0\n    read -d '' trigger_response < '_///shournal_trigger_response///_' || ret=$?\n    unset _LIBSHOURNAL_TRIGGER\n    unset _SHOURNAL_SHELL_PID\n\n    if [ $ret -ne 0 ]; then\n        _shournal_debug \"_shournal_trigger_update: failed to read \" \\\n                        \"trigger_response\"\n        return 1\n    fi\n    [ \"$trigger_response\" = ok ] && return 0 || return 1\n}\n\n_libshournal_enable(){\n    _shournal_trigger_update 0\n}\n\n_libshournal_disable(){\n    _shournal_trigger_update 1\n}\n\n_libshournal_prepare_cmd(){\n    _shournal_trigger_update 2\n}\n\n_libshournal_cleanup_cmd(){\n    _shournal_trigger_update 3\n}\n\n_libshournal_print_version(){\n    _shournal_trigger_update 4\n}\n\n_libshournal_update_verbosity(){\n    _shournal_trigger_update 5\n}\n\n_libshournal_is_loaded(){\n    local word_arr\n    local pathname\n    local IFS; unset IFS\n\n    if [ -n \"${ZSH_VERSION+x}\" ]; then\n        setopt LOCAL_OPTIONS\n        setopt sh_word_split\n    fi\n\n    while IFS=\"\" read -r row || [ -n \"$row\" ]; do\n        word_arr=($row)\n        # see man 5 proc section /proc/[pid]/maps\n        # word_arr[5] usually contains the pathname but may also be blank.\n        [ \"${#word_arr[@]}\" -lt 6 ] && continue\n        # portable array index access (zsh is one-based)\n        pathname=\"${word_arr[@]:5:1}\"\n        _shournal_endswith \"$pathname\" libshournal-shellwatch.so && return 0\n    done < \"/proc/$$/maps\"\n    return 1\n}\n\n\n\n# The following non-portable, shell specific functions _must_ be set\n# for each supported shell:\n# _shournal_add_prompts\n# _shournal_remove_prompts\n# _shournal_postexec\n# _shournal_verbose_reexec_allowed\ncase \"$_SHOURNAL_SHELL_NAME\" in\n  'bash')\nexport _LIBSHOURNAL_SEQ_COUNTER=1\n_shournal_ps0='${_SHOURNAL_SHELL_NAME:((_LIBSHOURNAL_SEQ_COUNTER++)):0}$(:)'\n\n_shournal_add_prompts(){\n    [ -z \"${PS0+x}\" ] && PS0=''\n    [ -z \"${PROMPT_COMMAND+x}\" ] && PROMPT_COMMAND=''\n\n    # Allright, what happens here? We use _SHOURNAL_SHELL_NAME as a dummy\n    # variable in order to increment _LIBSHOURNAL_SEQ_COUNTER without printing\n    # anything. Then we fork to notify libshournal-shellwatch.so that\n    # we're about to execute a command.\n    PS0=\"$PS0\"\"$_shournal_ps0\"\n    PROMPT_COMMAND=$'_shournal_postexec\\n'\"$PROMPT_COMMAND\"\n    # no _shournal_preexec for bash, see below ...\n    return 0\n}\n\n_shournal_remove_prompts(){\n    [ -n \"${PS0+x}\" ] && PS0=${PS0//\"$_shournal_ps0\"/}\n    [ -n \"${PROMPT_COMMAND+x}\" ] &&\n        PROMPT_COMMAND=${PROMPT_COMMAND//_shournal_postexec$'\\n'/}\n    return 0\n}\n\n## _____ End of must-override functions and variables _____ ##\n\n\n# _shournal_preexec(){\n#   For bash preexec is not implemented here but in\n#   in an interplay of above PS0 and libshournal-shellwatch.so.\n# }\n\n_shournal_postexec(){\n    local exitcode=$?\n    local cmd_str\n    [ -n \"${1+x}\" ] && exitcode=\"$1\"\n    _shournal_get_current_cmd_bash cmd_str\n    _shournal_postexec_generic \"$cmd_str\" \"$exitcode\"\n    return $exitcode\n}\n\n\n;; # END_OF bash _______________________________________________________\n\n'zsh')\n\n_shournal_add_prompts(){\n    preexec_functions+=(_shournal_preexec)\n    precmd_functions+=(_shournal_postexec)\n    return 0\n}\n\n_shournal_remove_prompts(){\n    unset _shournal_zsh_last_cmd\n\n    preexec_functions[$preexec_functions[(i)_shournal_preexec]]=()\n    precmd_functions[$precmd_functions[(i)_shournal_postexec]]=()\n    return 0\n}\n\n## _____ End of must-override functions and variables _____ ##\n\n\n_shournal_preexec(){\n    # maybe_todo: use $2 or $3 for expanded aliases instead of $1\n    _shournal_zsh_last_cmd=\"$1\"\n    _shournal_preexec_generic\n\n    return 0\n}\n\n_shournal_postexec(){\n    local exitcode=$?\n    [ -n \"${1+x}\" ] && exitcode=\"$1\"\n    _shournal_postexec_generic \"$_shournal_zsh_last_cmd\" $exitcode\n    return 0\n}\n\n\n\n\n;; # END_OF zsh ________________________________________________________\n  *)\n    echo \"shournal shell integration: something is seriously wrong, \" \\\n        \"_SHOURNAL_SHELL_NAME is not correctly setup\" >&2\n    return 1\n;;\nesac\n\n\nif [ -n \"${_shournal_enable_just_called+x}\" ] ; then\n    # A parent process has called SHOURNAL_ENABLE and exec'd itself\n    # again with the same arguments and our libshournal-shellwatch.so\n    # preloaded. Let the tracking begin ...\n    if ! _libshournal_is_loaded ; then\n        _shournal_error \"Although _'shournal_enable_just_called' is set, \" \\\n                        \"libshournal-shellwatch.so seems \" \\\n                        \"to be not loaded (bug?).\"\n        unset _shournal_enable_just_called\n        return 1\n    fi\n    _shournal_enable\nfi\n\n\n\n"
  },
  {
    "path": "shell-integration-scripts/integration_ko.sh",
    "content": "\n# shell-integration for shournal - kernel-module backend.\n\n\n_shournal_run_backend='shournal-run'\n\n_shournal_enable(){\n    local ret=0\n    [ -n \"${_shournal_int_traps+x}\" ] || _shournal_int_traps=()\n    _shournal_trap_push '' INT || return\n    _shournal_do_enable || ret=$?\n    _shournal_trap_pop || :\n    return $ret\n}\n\n_shournal_do_enable(){\n    local tmpdir\n    local ret=0\n    local cmd_str\n\n    if [ -n \"${_shournal_is_running+x}\" ] ; then\n        # maybe_todo: check that our prompts are still there.\n        _shournal_debug \"_shournal_enable: current session is already observed\"\n        return 0\n    fi\n\n    if ! \"$_shournal_run_backend\" --shournalk-is-loaded; then\n        _shournal_warn \"Cannot enable the shell-integration -\" \\\n                       \"the required kernel module is not loaded.\"\n        return 1\n    fi\n\n    if [ -e '/dev/shm' ]; then\n        tmpdir='/dev/shm'\n    else\n        [ -n \"${TMPDIR+x}\" ] && tmpdir=\"$TMPDIR\" || tmpdir=/tmp\n    fi\n\n    _shournal_fifo_basepath=\"$tmpdir/shournal-fifo-$USER-$$\"\n\n    # If an observed shell calls \"exec bash\" we end\n    # up with an already existing fifo.\n    # In almost all cases this is no problem, as the first time shournal-run is called\n    # the pid is claimed and the old shournal-run process exits. However, in case of\n    # sequence count 1 the previous and current fifo-paths collide, so just clean up\n    # in any case.\n    _shournal_detach_this_pid 0\n\n    if [ -n \"${_shournal_shell_exec_string+x}\" ]; then\n        # invoked via sh -c\n        cmd_str=\"\"\n        # FIXME: also collect /proc/$$/exe ?\n        while IFS= read -r -d '' line; do\n            [ -z \"$cmd_str\" ] && cmd_str=\"$line\" ||\n                                 cmd_str=\"$cmd_str $line\"\n        done < /proc/$$/cmdline\n\n        _shournal_preexec_generic 1 \"$cmd_str\" || return $?\n        _shournal_is_running=true\n    else\n        SHOURNAL_SESSION_ID=\"$(shournal-run --make-session-uuid)\" || return $?\n        export SHOURNAL_SESSION_ID\n        export SHOURNAL_CMD_COUNTER=0\n\n        # Usually removing prompts should not be necessary here,\n        # however, if a user exports PS0/PROMPT_COMMAND\n        # and starts a new bash-session, we need to get rid of the existing commands.\n        _shournal_remove_prompts || return $?\n        _shournal_add_prompts || return $?\n        _shournal_is_running=true\n    fi\n    return 0\n}\n\n_shournal_disable(){\n    # Note that there are at least three cases how we can get here:\n    # • User-invoked SHOURNAL_DISABLE\n    # • Error during pre/postexec\n    # • exit trap\n    local exitcode=\"$1\"\n\n    _shournal_debug \"_shournal_disable: about to disable\" \\\n                    \"with exitcode $exitcode\"\n\n    if [ -z \"${_shournal_is_running+x}\" ]; then\n        _shournal_debug \"_shournal_disable: not running\"\n        return 0\n    fi\n\n    _shournal_trap_push '' INT || :\n\n    _shournal_remove_prompts\n    _shournal_detach_this_pid \"$exitcode\"\n    # Don't unset _shournal_int_traps here - we may have been called nested!\n    unset _shournal_is_running _shournal_preexec_ret \\\n          _shournal_fifo_basepath\n\n    _shournal_trap_pop || :\n    return 0\n}\n\n_shournal_set_verbosity(){\n    :\n}\n\n_shournal_print_versions(){\n    echo \"shournal-run: $(shournal-run --version)\"\n}\n\n\n_shournal_send_msg(){\n    # send json string to last started shournal.\n    # for the different message types (msgType), see enum FIFO_MSG in c++.\n    local fifofd=\"$1\"\n    local msg_type=\"$2\"\n    local msg_data=\"$3\"\n    local ret=0\n\n    # simple json string type-data: { \"msgType\":0, \"data\":\"stuff\" }\n    local full_msg=\"{\\\"msgType\\\":$msg_type,\\\"data\\\":\\\"$msg_data\\\"}\"\n    _shournal_debug \"_shournal_send_msg: sending message: $full_msg\"\n\n    echo \"$full_msg\" >&$fifofd || ret=$?\n    if [ $ret -ne 0 ]; then\n        _shournal_error \"_shournal_send_msg: failed to write to fifo-FD $fifofd: $ret\"\n        return $ret\n    fi\n    return 0\n}\n\n_shournal_send_ret_val(){\n    _shournal_send_msg \"$1\" 0 \"$2\"\n}\n\n\n_shournal_send_unmark_pid(){\n    _shournal_send_msg \"$1\" 1 \"$2\"\n}\n\n_shournal_run_finalize(){\n    local fifopath=\"$1\"\n    local exitcode=\"$2\"\n    local _shournal_fifofd\n\n    # Open the FIFO RDWR to be protected against deadlocks which may occur, e.g.,\n    # if shournal dies after having set up the FIFO. Note that shournal will ignore\n    # this event, because a FIFO is not a regular file.\n    if ! { exec {_shournal_fifofd}<>\"$fifopath\"; } 2>/dev/null; then\n        _shournal_debug \"_shournal_run_finalize: opening fifopath \\\"$fifopath\\\" failed.\"\n        return 0\n    fi\n    _shournal_send_ret_val $_shournal_fifofd $exitcode\n    _shournal_send_unmark_pid $_shournal_fifofd $$\n    exec {_shournal_fifofd}<&-\n\n    # If everything goes well, this rm is not needed, as shournal performs it for us.\n    # However, if shournal died in the background, we have created the now REGULAR file at\n    # $fifopath ourselves. So KISS and delete always.\n    rm \"$fifopath\" 2>/dev/null\n}\n\n\n# Find a fifo (if any) that was created by this shell previously and\n# instruct the belonging shournal-run process to stop\n# observing this pid.\n_shournal_detach_this_pid(){\n    local exitcode=\"$1\"\n    local fifopath\n    local ret=0\n\n    if [ \"$_SHOURNAL_SHELL_NAME\" = 'zsh' ]; then\n        # supress nomatch error messages (and aborts)\n        setopt LOCAL_OPTIONS\n        unsetopt nomatch\n    fi\n\n    # use globbing to ignore the sequence number\n    set -- \"$_shournal_fifo_basepath\"*\n    # Note: in case of no results $1 is _not_ empty, so check\n    # for existence.\n    if [ -e \"$1\" ] ; then\n        if [ $# -eq 1 ]; then\n            fifopath=${1%%$'\\n'*} # should not be necessary\n            _shournal_debug \"_shournal_detach_this_pid at $fifopath\"\n            _shournal_run_finalize \"$fifopath\" \"$exitcode\" || ret=$?\n        else\n            _shournal_error \"_shournal_detach_this_pid: unexpected fifos $@\"\n            ret=1\n        fi\n    fi\n    return $ret\n}\n\n# preexec is run before a valid command (but not when ENTER or Ctrl+C is hit).\n# We launch a shournal-run process and wait for it to setup and\n# fork into background.\n_shournal_preexec_generic(){\n    local current_seq=\"$1\"\n    local cmd_str=\"$2\"\n    local fifopath\n    local args_array\n\n    if ! _shournal_verbose_history_check; then\n        _shournal_warn \"history settings were modified after the shell integration was turned on. \" \\\n                       \"Please correct that or call SHOURNAL_DISABLE \" \\\n                       \"to get rid of this message.\"\n        return 1\n    fi\n    fifopath=\"$_shournal_fifo_basepath-$current_seq\"\n    _shournal_debug \"_shournal_preexec_generic: using fifo at $fifopath\"\n    _shournal_warn_on \"[ -e \\\"$fifopath\\\" ]\"\n\n    args_array=(\n        --verbosity \"$_SHOURNAL_VERBOSITY\" --pid $$ --fork\n        --close-fds --fifoname \"$fifopath\"\n        --cmd-string \"$cmd_str\"\n    )\n\n    [ -n \"${SHOURNAL_SESSION_ID+x}\" ] &&\n        args_array+=(--shell-session-uuid \"$SHOURNAL_SESSION_ID\")\n\n    # Argument --close-fds is important here for the following reasons:\n    # * We may run within a subshell which waits for redirected stdout to\n    #   close (deadlock otherwise).\n    # * We have created a custom redirection, e.g. with\n    #   exec 3> foo; echo \"test\" >&3;\n    #   exec 3>&-; # closes 3\n    #   In this case **without closing** within shournal-run the close event would be lost,\n    #   as the final __fput is reached during shournal-run exit().\n    # Argument --fork: shournal forks itself into background once setup\n    # is ready, so we can just wait here.\n    if ! shournal-run \"${args_array[@]}\"; then\n        # only debug here - there should already be two warnings - one from\n        # shournal-run or bash not able to execute and (likely) one afterwards\n        # from the prompt.\n        _shournal_debug \"_shournal_preexec_generic: shournal-run setup failed\"\n        return 1\n    fi\n    return 0\n}\n\n\n# postexec is run after any command, but eventually also after hitting ENTER\n# or Ctrl (other than PS0). However, the command sequence counter\n# SHOURNAL_CMD_COUNTER is only incremented in case of valid commands.\n# To avoid duplicate cleanups, we look at the return value set in PS0.\n# * If it's unset, no preexec has run yet.\n# * if it's -1, preexec was possibly run, but aborted in between\n_shournal_postexec_generic(){\n    local current_seq=\"$1\"\n    local exitcode=\"$2\"\n    local fifopath\n    local die=false\n\n    if [ -z \"${_shournal_preexec_ret+x}\" ]; then\n        _shournal_debug \"_shournal_postexec_generic: no preexec run yet \"\n        return 0\n    fi\n\n    case \"$_shournal_preexec_ret\" in\n        0) : ;;\n        '')\n            _shournal_debug \"_shournal_postexec_generic: already cleaned up\"\n            return 0\n        ;;\n        -1|130)\n            _shournal_debug \"_shournal_postexec_generic: _shournal_preexec_ret is\" \\\n                            \"$_shournal_preexec_ret. This was likely caused by Ctrl+C (SIGINT).\"\n        ;;\n        *)\n            _shournal_debug \"_shournal_postexec_generic: about to die due to\" \\\n                            \"_shournal_preexec_ret of $_shournal_preexec_ret\"\n            die=true\n        ;;\n    esac\n\n    fifopath=\"$_shournal_fifo_basepath-$current_seq\"\n    _shournal_debug \"_shournal_postexec_generic: using fifo at $fifopath\"\n\n    _shournal_trap_push '' INT || :\n\n    _shournal_run_finalize \"$fifopath\" \"$exitcode\"\n    _shournal_preexec_ret=''\n    if [ \"$die\" = true ] ; then\n        _shournal_warn \"Disabling the shell-integration due to previous setup-erros...\"\n        SHOURNAL_DISABLE\n    fi\n    _shournal_trap_pop || :\n\n    return $exitcode\n}\n\n\n# The following non-portable, shell specific functions _must_ be set\n# for each supported shell:\n# _shournal_add_prompts\n# _shournal_remove_prompts\n#\n# During the prompts _shournal_preexec_generic and\n# _shournal_postexec_generic must be\n# called respectively.\n\ncase \"$_SHOURNAL_SHELL_NAME\" in\n  'bash')\n# We use _SHOURNAL_SHELL_NAME as a dummy variable in order to increment\n# SHOURNAL_CMD_COUNTER without printing anything in PS0. First increment,\n# then execute shournal. Otherwise a SIGINT may abort PS0 execution,\n# preventing the increment.\n_shournal_ps0='${_SHOURNAL_SHELL_NAME:((_shournal_preexec_ret=-1)):0}'\\\n'${_SHOURNAL_SHELL_NAME:((++SHOURNAL_CMD_COUNTER)):0}'\\\n'$(_shournal_preexec $SHOURNAL_CMD_COUNTER)'\\\n'${_SHOURNAL_SHELL_NAME:((_shournal_preexec_ret=$?)):0}'\n_shournal_prompt_command=$'_shournal_postexec\\n'\n\n\n_shournal_add_prompts(){\n    [ -z \"${PS0+x}\" ] && PS0=''\n    [ -z \"${PROMPT_COMMAND+x}\" ] && PROMPT_COMMAND=''\n    PS0+=\"$_shournal_ps0\"\n    PROMPT_COMMAND=\"${_shournal_prompt_command}${PROMPT_COMMAND}\"\n    return 0\n}\n\n_shournal_remove_prompts(){\n    [ -n \"${PS0+x}\" ] && PS0=${PS0//\"$_shournal_ps0\"/}\n    [ -n \"${PROMPT_COMMAND+x}\" ] &&\n        PROMPT_COMMAND=${PROMPT_COMMAND//\"$_shournal_prompt_command\"/}\n    return 0\n}\n## _____ End of must-override functions and variables _____ ##\n\n\n_shournal_preexec(){\n    local current_seq=\"$1\"\n    local cmd_str\n\n    if [[ -z \"${PROMPT_COMMAND+x}\" || \"$PROMPT_COMMAND\" != *\"$_shournal_prompt_command\"* ]]; then\n        _shournal_error \"_shournal_preexec: Invalid PROMPT_COMMAND. Apparently\" \\\n            \"PROMPT_COMMAND was modified after SHOURNAL_ENABLE\" \\\n            \"was called. This is often caused by double-sourcing the bashrc, e.g. from\" \\\n            \"~/.profile or .bash_profile.\"\n        return 1\n    fi\n\n    _shournal_get_current_cmd_bash cmd_str\n    _shournal_preexec_generic \"$current_seq\" \"$cmd_str\"\n}\n\n# Disable the shell-integration in case of setup-errors, to avoid\n# spamming the user. Setup may in particular fail in cases where\n# shournal is updated while the kernel module of the old version\n# is still active.\n# Note that other than _shournal_preexec this\n# function is executed in the *parent shell*.\n_shournal_postexec(){\n    local ret=$?\n    _shournal_postexec_generic \"$SHOURNAL_CMD_COUNTER\" \"$ret\" || :\n    return $ret\n}\n\n\n;; # END_OF bash _______________________________________________________\n\n'zsh')\n\n\n_shournal_add_prompts(){\n    preexec_functions+=(_shournal_preexec)\n    precmd_functions+=(_shournal_postexec)\n    return 0\n}\n\n_shournal_remove_prompts(){\n    preexec_functions[$preexec_functions[(i)_shournal_preexec]]=()\n    precmd_functions[$precmd_functions[(i)_shournal_postexec]]=()\n    return 0\n}\n\n## _____ End of must-override functions and variables _____ ##\n\n\n_shournal_preexec(){\n    # maybe_todo: use $2 or $3 for expanded aliases instead of $1\n    local cmd_str=\"$1\"\n    local ret=0\n\n    _shournal_preexec_ret=-1\n    ((++SHOURNAL_CMD_COUNTER))\n    _shournal_preexec_generic $SHOURNAL_CMD_COUNTER \"$cmd_str\" || ret=$?\n    _shournal_preexec_ret=$ret\n    return $ret\n}\n\n_shournal_postexec(){\n    local exitcode=$?\n    _shournal_postexec_generic $SHOURNAL_CMD_COUNTER $exitcode || return $?\n    return 0\n}\n\n\n;; # END_OF zsh ________________________________________________________\n  *)\n    echo \"shournal shell integration: sourced from unsupported shell - \" \\\n         \"currently only bash and zsh are supported.\" >&2\n    return 1\n;;\nesac\n\n\n\n\n"
  },
  {
    "path": "shell-integration-scripts/integration_main.sh.in",
    "content": "\n# Shell integration for shournal\n# This file contains all public functions and\n# must be compatible with all supported shells.\n\nSHOURNAL_ENABLE(){\n    local cmd_path args_array line ret=0\n    local clusterjob_reexec_counter\n\n    if _shournal_is_subshell; then\n        _shournal_warn \"shournal's shell integration must not be enabled from\" \\\n                       \"within a subshell\"\n        return 1\n    fi\n\n    if [ -z \"$(command -v shournal)\" ] ; then\n        _shournal_error \"cannot enable shournal's shell integration - command «shournal» not found\"\n        return 1\n    fi\n\n    if [ -z \"$(command -v \"$_shournal_run_backend\")\" ] ; then\n        _shournal_error \"cannot enable shournal's shell integration - \" \\\n                        \"command «$_shournal_run_backend» not found\"\n        return 1\n    fi\n\n    if ! shournal --validate-settings; then\n        # informative mesg. should have been already printed by shournal\n        _shournal_error \"shell integration is _not_ enabled\"\n        return 1\n    fi\n\n    if [ -n \"${_SHOURNAL_CLUSTERJOB_JUST_REEXECUTED+x}\" ]; then\n        _shournal_debug \"shournal just re-executed this clusterjob.\" \\\n                        \"SHOURNAL_ENABLE is ignored this time.\"\n        unset _SHOURNAL_CLUSTERJOB_JUST_REEXECUTED\n        return 0\n    fi\n\n    if _shournal_clusterjob_reexec_ok; then\n        # Do not use BASH_EXECUTION_STRING, it is not set in all cluster jobs.\n        args_array=()\n        while IFS= read -r -d '' line; do\n            args_array+=(\"$line\")\n        done < /proc/$$/cmdline\n        if [ ${#args_array[@]} -lt 2 ]; then\n            _shournal_warn \"SHOURNAL_ENABLE: we detected ${args_array[*]} as\" \\\n                           \"cluster job without arguments. Please report.\"\n        else\n            cmd_path=\"$(readlink /proc/$$/exe)\"\n            _shournal_debug \"running cluster job: $cmd_path ${args_array[*]}\"\n            clusterjob_reexec_counter=${_SHOURNAL_CLUSTERJOB_REEXEC_COUNTER:-0}\n            [ $clusterjob_reexec_counter -gt 0 ] &&\n                _shournal_debug \"unusual clusterjob_reexec_counter of $clusterjob_reexec_counter\"\n            ((++clusterjob_reexec_counter))\n            _SHOURNAL_CLUSTERJOB_REEXEC_COUNTER=\"$clusterjob_reexec_counter\" \\\n                _SHOURNAL_CLUSTERJOB_JUST_REEXECUTED=true exec \\\n                shournal --backend-filename \"$_shournal_run_backend\" \\\n                --verbosity \"$_SHOURNAL_VERBOSITY\" \\\n                --exec-filename \"$cmd_path\" --exec -- \"${args_array[@]}\"\n            # only get here on error\n            return 1\n        fi\n    fi\n\n    if [ -z \"${_shournal_shell_exec_string+x}\" ]; then\n         # Running somewhat \"interactively\"\n        _shournal_verbose_history_check || return $?\n    fi\n\n    _shournal_enable || ret=$?\n    return $ret\n}\n\n\nSHOURNAL_DISABLE() {\n    # In case we were called in a sequence, e.g.\n    # $ (exit 123); SHOURNAL_DISABLE\n    # capture the exit code here\n    local exitcode=$?\n    local ret=0\n\n    if _shournal_is_subshell; then\n        _shournal_warn \"shournal's shell integration must not be disabled from\" \\\n                       \"within a subshell\"\n        return 1\n    fi\n\n    if _shournal_is_clusterjob && [ -n \"${_SHOURNAL_CLUSTERJOB_REEXEC_COUNTER+x}\" ]; then\n        _shournal_warn \"shournal seems to trace this process as a cluster job\" \\\n                       \"and cannot be disabled in that mode.\"\n        return 1\n    fi\n\n    _shournal_disable $exitcode || return ret=$?\n    if [ $ret -eq 0 ]; then\n        unset _shournal_current_pid SHOURNAL_SESSION_ID SHOURNAL_CMD_COUNTER\n    fi\n    return $ret\n}\n\n\n# $1: pass one of dbg, info, warning, critical\nSHOURNAL_SET_VERBOSITY(){\n    case \"$1\" in\n    \"dbg\")\n        _shournal_shell_integration_log_level=0\n        ;;\n    \"info\")\n        _shournal_shell_integration_log_level=1\n        ;;\n    \"warning\")\n        _shournal_shell_integration_log_level=2\n        ;;\n    \"critical\")\n        _shournal_shell_integration_log_level=3\n        ;;\n    *)\n        _shournal_warn \"Bad verbosity passed. Pass one of dbg, info, warning, critical\"\n        return 1\n        ;;\n    esac\n\n    # verbosity for shournal-run*\n    export _SHOURNAL_VERBOSITY=\"$1\"\n    _shournal_set_verbosity \"$1\"\n}\n\nSHOURNAL_PRINT_VERSIONS() {\n    echo \"shournal $_SHOURNAL_SHELL_NAME integration version: $_shournal_version\"\n    echo \"shournal: $(shournal --version)\"\n    _shournal_print_versions\n}\n\n\n# _________ End of public interface _________ #\n\n\n# BEGIN_SECTION auto-filled by cmake\n_shournal_version=\"@shournal_version@\"\n\n# -z: Allow to be overwritten\n[ -z \"${SHOURNAL_PATH_LIB_SHELL_INTEGRATION+x}\" ] &&\n  export SHOURNAL_PATH_LIB_SHELL_INTEGRATION=\"@full_path_libshournal@\"\n\n# END_SECTION auto-filled by cmake\n\n\n# We have to set several global variables here and _not_ in SHOURNAL_ENABLE\n# for the libshournal-shellwatch.so LD_PRELOAD-hack. If it is not\n# not loaded, on SHOURNAL_ENABLE we exec the current shell again and\n# perform the actual initialization. Therefore, *this*\n# script must be sourced within the shell's rc but we don't want to\n# require SHOURNAL_ENABLE within the rc-file.\n\n# 0: debug, 1: info, 2: warning, 3: error\n[ -z \"${_shournal_shell_integration_log_level+x}\" ] &&\n    _shournal_shell_integration_log_level=2\n# verbosity for shournal-run* ( _must_ be exported )\n[ -z \"${_SHOURNAL_VERBOSITY+x}\" ] &&\n    export _SHOURNAL_VERBOSITY=\"warning\"\n# verbosity for libshournal-shellwatch.so ( _must_ be exported )\n[ -z \"${_SHOURNAL_LIB_SHELL_VERBOSITY+x}\" ] &&\n    export _SHOURNAL_LIB_SHELL_VERBOSITY=\"warning\"\n\n\n# Setup non-portable stuff for\n# each supported shell. The following variables _must_ be set:\n# _SHOURNAL_SHELL_NAME  (name of the current shell). It must be exported\n# for libshournal-shellwatch.so.\n# _shournal_shell_exec_string - if and only if the command is executed non-interactively\n# For zsh aliases: avoid error on double source\nunalias _shournal_trap_set &>/dev/null || :\nif [ -n \"${BASH_VERSION+x}\" ]; then\nexport _SHOURNAL_SHELL_NAME='bash'\n\n# The bash execution string is e.g. set when running bash -c 'echo foo', in which case we never get to\n# any prompt. Simply execute the whole command\n# within shournal.\n# Checking $BASH_EXECUTION_STRING seems to be more reliable than [[ $- == *i* ]], because\n# of commands like e.g. bash -i -c 'echo \"wtf - is that interactive?\"'\n[ -n \"${BASH_EXECUTION_STRING+x}\" ] &&\n    _shournal_shell_exec_string=\"$BASH_EXECUTION_STRING\"\n\n_shournal_trap_push(){\n    local trap_tmp\n    # First save to temporary variable, as we are not interrupt-safe yet\n    trap_tmp=\"$(trap -p INT)\"\n    trap \"$@\"\n    _shournal_int_traps+=(\"$trap_tmp\")\n    return 0\n}\n\n_shournal_trap_pop(){\n    local old_trap\n    if [ ${#_shournal_int_traps[@]} -eq 0 ]; then\n        _shournal_error \"_shournal_trap_pop - no int trap set\" >&2\n        return 1\n    fi\n    old_trap=\"${_shournal_int_traps[-1]}\"\n    # first unset: if we reset to default trap and user hits Ctrl+C, we would\n    # leak the array element\n    unset _shournal_int_traps[-1]\n    if [ -z \"$old_trap\" ]; then\n        trap - INT\n    else\n        eval \"$old_trap\"\n    fi\n    return 0\n}\n\nelif [ -n \"${ZSH_VERSION+x}\" ]; then\n\nexport _SHOURNAL_SHELL_NAME='zsh'\n\n[ -n \"${ZSH_EXECUTION_STRING+x}\" ] &&\n    _shournal_shell_exec_string=\"$ZSH_EXECUTION_STRING\"\n\n# This has to be at the top, so aliases are expanded in the other functions (files\n# are appended to this script)\nsetopt aliases\nalias _shournal_trap_push='setopt localtraps; trap'\n_shournal_trap_pop(){ :; }\n\nelse\n    echo \"shournal shell integration: sourced from unsupported shell - \" \\\n         \"currently only bash and zsh are supported.\" >&2\n    return 1\nfi\n"
  },
  {
    "path": "shell-integration-scripts/util.sh",
    "content": "\n\n# don't call it directly, but use one of debug, info, warning, error functions\n# $1: loglevel.\n# all other args: is printed to stderr\n_shournal_log_msg(){\n    local loglevel=$1\n    shift\n    [ \"$loglevel\" -ge \"$_shournal_shell_integration_log_level\" ] &&\n        >&2  printf \"shournal $_SHOURNAL_SHELL_NAME integration - $*\\n\"\n}\n\n_shournal_error() {\n     _shournal_log_msg 3 \"ERROR: $*\"\n}\n\n_shournal_warn(){\n    _shournal_log_msg 2 \"warning: $*\"\n}\n\n_shournal_info(){\n        _shournal_log_msg 1 \"info: $*\"\n}\n\n_shournal_debug(){\n        _shournal_log_msg 0 \"debug: $*\"\n}\n\n_shournal_warn_on(){\n    eval \"$1\" && _shournal_warn \"$1\"\n}\n\n\n# returns true, if $1 starts with $2\n_shournal_startswith() {\n    case $1 in\n        \"$2\"*) return 0;;\n        *) return 1;;\n    esac;\n}\n\n# returns true, if $1 ends with $2\n_shournal_endswith() {\n    case $1 in\n        *\"$2\") return 0;;\n        *) return 1;;\n    esac;\n}\n\n# returns true, if $1 contains $2\n_shournal_contains() {\n    case $1 in\n        *\"$2\"*) return 0;;\n        *) return 1;;\n    esac;\n}\n\n# Trim leading and trailing spaces\n_shournal_trim(){\n    echo -e \"${1}\" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//'\n}\n\n_shournal_is_clusterjob(){\n    # Recent versions of the sun grid engine (SGE) use cgroups to manage\n    # processes of a job. Once the main job-script finishes, all leftovers\n    # are killed. Therefore, shournal cannot flush the events to the\n    # database in background afterwards, so we fall back to foreground execution.\n    # Usually SHOURNAL_ENABLE is expected to be part of the shell's rc,\n    # e.g. ~/.bashrc, which we require for a \"safe\" re-execution.\n    # A cluster-job should be: non-interactive and running within a login-shell.\n    [ -n \"${SHOURNAL_IS_CLUSTERJOB+x}\" ] && return 0\n    [ -z \"${SGE_O_WORKDIR+x}\"  -o  -z \"${JOB_NAME+x}\"  -o  -t 0  -o \\\n      -t 1  -o  -t 2  -o  -n \"${SHOURNAL_NO_CLUSTER_JOB_DETECT+x}\" ] && return 1\n    _shournal_sh_is_interactive && return 1\n    return 0\n}\n\n_shournal_clusterjob_reexec_ok(){\n    # s. _shournal_is_clusterjob\n    _shournal_is_clusterjob || return $?\n    if ! _shournal_verbose_reexec_allowed; then\n        _shournal_warn \"cluster job detected, but we are not allowed to re-exec.\" \\\n                       \"Please check your environment.\"\n        return 1\n    fi\n    return 0\n}\n\n_shournal_is_subshell(){\n    _shournal_refresh_current_pid || return $?\n    [ $_shournal_current_pid -ne $$ ] && return 0\n    return 1\n}\n\n_shournal_refresh_current_pid(){\n    local pid ret=0\n    read -d ' ' pid < /proc/self/stat || ret=$?\n    if [ $ret -ne 0 ]; then\n        _shournal_error \"_shournal_refresh_current_pid:\" \\\n            \"failed to read from /proc/self/stat: $ret\"\n        return $ret\n    fi\n    _shournal_current_pid=$pid\n    return 0\n}\n\n# Non-portable, shell specific functions:\ncase \"$_SHOURNAL_SHELL_NAME\" in\n  'bash')\n\n# returns 0, if all history settings were ok, else false.\n# is verbose, if a setting is not ok.\n_shournal_verbose_history_check(){\n    # no history needed if running non-interactively\n    [ -n \"${BASH_EXECUTION_STRING+x}\" ] && return 0\n    local ret=0\n\n    if ! [ -o history ]; then\n        ret=1\n        _shournal_error \"bash history is off. Please enable it: set -o history\"\n    fi\n\n    if [[ ${HISTSIZE-0} -lt 2 ]]; then\n        ret=1\n        _shournal_error \"bash HISTSIZE is too small (or not set). Please set it at least to 2: HISTSIZE=2\"\n    fi\n\n    if [[ ${HISTCONTROL-} == *\"ignorespace\"* || ${HISTCONTROL-} == *\"ignoreboth\"* ]]; then\n        ret=1\n        _shournal_error \"Commands with spaces are set to be ignored from history. Please disable that, \" \\\n                       \"e.g. HISTCONTROL=ignoredups or HISTCONTROL=''\"\n    fi\n\n    if [[ -n ${HISTIGNORE-} ]] ; then\n        ret=1\n        _shournal_error \"HISTIGNORE is not empty. Please unset it: unset HISTIGNORE\"\n    fi\n    return $ret\n}\n\n_shournal_get_current_cmd_bash(){\n    declare -n ret=$1\n    local cmd\n    # history output is e.g.\n    # \" 6989  echo foo\"\n    # so strip the leading \" 6989  \"\n    cmd=\"$(HISTTIMEFORMAT='' history 1)\"\n    [[ \"$cmd\" =~ ([[:space:]]*[0-9]+[[:space:]]*)(.*) ]]\n    ret=\"${BASH_REMATCH[2]}\"\n    return 0\n}\n\n_shournal_print_current_cmd(){\n    local cmd_str\n    _shournal_get_current_cmd_bash cmd_str\n    printf '%s\\n' \"$cmd_str\"\n}\n\n_shournal_refresh_current_pid(){\n    _shournal_current_pid=$BASHPID\n    return 0\n}\n\n_shournal_sh_is_interactive(){\n    [[ $- == *i* ]] && return 0\n    return 1\n}\n\n\n# Return true, if we are allowed to reexec, which we do in case\n# of non-interactive ssh commands (for the fanotify backend), where the\n# BASH_EXECUTION_STRING is always set, or in case of cluster jobs, which\n# are usually invoked as login_shell.\n# Re-exec is not allowed, if the a command\n# within the -c '..' arg was already executed\n# (it would be executed twice otherwise).\n_shournal_verbose_reexec_allowed(){\n    local i sourced_from_bashrc\n    if [[ -z ${BASH_EXECUTION_STRING+x} ]] && ! shopt -q login_shell; then\n        return 1\n    fi\n    # Only consider this a running cluster job, if we are sourced from .bashrc.\n    # FIXME: this is not robust, bash -c 'echo foo; source ~/.bashrc'\n    # should _not_ be allowed.\n    if [[ -z ${BASH_SOURCE+x} ]]; then\n        _shournal_error \"BASH_SOURCE is not set. Something is seriously\" \\\n            \"wrong here, aborting...\"\n        return 1\n    fi\n    sourced_from_bashrc=false\n    for ((i=1; i<${#BASH_SOURCE[@]}; i++)); do\n        if [[ \"${BASH_SOURCE[i]##*/}\" == .bashrc ]]; then\n            sourced_from_bashrc=true\n            break\n        fi\n    done\n\n    if [[ $sourced_from_bashrc == false ]]; then\n        _shournal_warn \"The command was considered for re-execution, but\" \\\n                       \"we require to be sourced from .bashrc for SHOURNAL_ENABLE.\" \\\n                       \"Alternatively, invoke »shournal -e« directly.\"\n        return 1\n    fi\n    return 0\n}\n\n;;   # END_OF bash\n    'zsh')\n\n_shournal_verbose_history_check(){\n    # no history needed if running non-interactively\n    [ -n \"${ZSH_EXECUTION_STRING+x}\" ] && return 0\n\n    # While in bash we retrieve the command-string\n    # from history, in zsh it is directly passed\n    # to our preexec_function, which seems to work\n    # regardless of history options like e.g.\n    # $ setopt HIST_IGNORE_SPACE\n    return 0\n}\n\n\n_shournal_print_current_cmd(){\n    printf '%s\\n' \"$history[$HISTCMD]\"\n}\n\n_shournal_sh_is_interactive(){\n    [[ -o interactive ]] && return 0\n    return 1\n}\n\n_shournal_verbose_reexec_allowed(){\n    local toplevel_contex\n    if [[ -z ${ZSH_EXECUTION_STRING+x} && ! -o login ]]; then\n        return 1\n    fi\n    zmodload zsh/parameter\n    toplevel_context=\"${zsh_eval_context[1]}\"\n    case \"$toplevel_context\" in\n    file) :;;\n    cmdarg)\n        _shournal_warn \"eval-toplevel-context $toplevel_context not allowed\"\n        return 1;;\n    *)\n        _shournal_warn \"unhandled eval-toplevel-context $toplevel_context.\" \\\n                       \"Please report if you\" \\\n                       \"think that SHOURNAL_ENABLE should be possible here.\"\n        return 1;;\n    esac\n    return 0\n}\n\n;;  # END_OF zsh\n  *)\n    echo \"shournal shell integration: something is seriously wrong, \" \\\n         \"_SHOURNAL_SHELL_NAME is not correctly setup\" >&2\n;;\nesac\n"
  },
  {
    "path": "src/CMakeLists.txt",
    "content": "\n\ninclude_directories(\n    ../extern\n    ../kernel\n    common/\n    common/database\n    common/oscpp\n    common/oscpp\n    common/qoptargparse\n    common/qsimplecfg\n    common/util\n    )\n\nadd_subdirectory(\"common\")\nadd_subdirectory(\"shournal\")\n\nif(${SHOURNAL_EDITION} MATCHES \"full|docker|ko\")\n    add_subdirectory(\"shournal-run\")\nendif()\nif(${SHOURNAL_EDITION} MATCHES \"full|docker|fanotify\")\n    add_subdirectory(\"shournal-run-fanotify\")\n    add_subdirectory(\"shell-integration-fanotify\")\nendif()\n\n"
  },
  {
    "path": "src/common/CMakeLists.txt",
    "content": "\nadd_subdirectory(util)\nadd_subdirectory(oscpp)\nadd_subdirectory(qoptargparse)\nadd_subdirectory(qsimplecfg)\n\nSET(lib_shournal_common_files\n    app.cpp\n    cefd.cpp\n    console_dialog.cpp\n    cxxhash.cpp\n    fdcommunication.cpp\n    fileeventhandler.cpp\n    fileevents.cpp\n    generic_container.h\n    groupcontrol.cpp\n    hashcontrol.cpp\n    hashmeta.cpp\n    idmapentry.h\n    interrupt_handler.cpp\n    logger.cpp\n    limited_priority_queue.h\n    pidcontrol.cpp\n    pathtree.cpp\n    qfddummydevice.cpp\n    qfilethrow.cpp\n    qresource_helper.cpp\n    safe_file_update.h\n    settings.cpp\n    shournal_run_common.cpp\n    stdiocpp.cpp\n    stupidinject.cpp\n    socket_message.cpp\n    subprocess.cpp\n    user_kernerl.h\n    xxhash_common.h\n    xxhash_common.c\n)\n\nset(database_files\n    database/db_connection.cpp\n    database/db_controller.cpp\n    database/sqlite_database_scheme.cpp\n    database/commandinfo.cpp\n    database/sessioninfo.cpp\n    database/fileinfos.cpp\n    database/sqlquery.cpp\n    database/file_query_helper.cpp\n    database/insertifnotexist.cpp\n    database/query_columns.h\n    database/db_conversions.cpp\n    database/sqlite_database_scheme_updates.cpp\n    database/storedfiles.cpp\n    database/db_globals.cpp\n    database/command_query_iterator.cpp\n    database/qexcdatabase.cpp\n    database/qsqlquerythrow.cpp\n)\n\nadd_library(lib_shournal_common\n    ${lib_shournal_common_files}\n    ${database_files}\n)\n\ntarget_link_libraries(lib_shournal_common PUBLIC\n    Qt5::Core\n    Qt5::Sql\n    Qt5::Network\n    xxhash\n    uuid\n    ${CMAKE_DL_LIBS}\n    cap\n    lib_util\n    oscpp_lib\n    lib_qoptargparse\n    lib_qsimplecfg\n)\n"
  },
  {
    "path": "src/common/app.cpp",
    "content": "\n#include <cstdlib>\n\n#include <QString>\n#include <QCoreApplication>\n#include <QStandardPaths>\n\n#include \"qoutstream.h\"\n#include \"app.h\"\n#include \"util.h\"\n#include \"osutil.h\"\n\n// may be supplied at buildtime, else should be defined in cmake file\n#ifndef SHOURNAL_MSENTERGROUP\nstatic_assert (false, \"SHOURNAL_MSENTERGROUP not defined\");\n#endif\n\n\nconst char* app::CURRENT_NAME = \"UNDEFINED\";\nconst char* app::SHOURNAL = \"shournal\";\nconst char* app::SHOURNAL_RUN = \"shournal-run\";\nconst char* app::SHOURNAL_RUN_FANOTIFY = \"shournal-run-fanotify\";\n// groupnames should be smaller than 16 characters (portability).\nconst char* app::MSENTER_ONLY_GROUP = SHOURNAL_MSENTERGROUP; // defined in cmake\nconst char* app::ENV_VAR_SOCKET_NB = \"_SHOURNAL_SOCKET_NB\";\n\nconst std::unordered_set<QString> &app::VERBOSITIES = {\"dbg\",\n                                            #if QT_VERSION >= QT_VERSION_CHECK(5, 5, 0)\n                                                \"info\",\n                                            #endif\n                                                \"warning\", \"critical\"};\n\n\nstatic bool g_inIntegrationTestMode=false;\n\n\nvoid app::setupNameAndVersion(const char* currentName)\n{\n    app::CURRENT_NAME = currentName;\n    QIErr::setPreambleCallback([]() { return QString(app::CURRENT_NAME) + \": \"; });\n\n    g_inIntegrationTestMode = getenv(\"_SHOURNAL_IN_INTEGRATION_TEST_MODE\") != nullptr;\n    QString integrationSuffix;\n\n    if(g_inIntegrationTestMode){\n        integrationSuffix = \"-integration-test\";\n        QStandardPaths::setTestModeEnabled(true);\n        // QIErr() << \"running in integration test mode\";\n    }\n    QCoreApplication::setApplicationName(app::SHOURNAL + integrationSuffix);\n    QCoreApplication::setApplicationVersion(app::version().toString());\n\n}\n\nbool app::inIntegrationTestMode()\n{\n    return g_inIntegrationTestMode;\n}\n\nint app::findIntegrationTestFd(){\n    if(! app::inIntegrationTestMode()){\n        return -1;\n    }\n    QByteArray fdStr = getenv(\"_SHOURNAL_INTEGRATION_TEST_PIPE_FD\");\n    if(fdStr.isNull()){\n        QIErr() << \"Although in integration-test, cannot\"\n                      \"find pipe fd in env!\";\n        return -1;\n    }\n    int fd = qVariantTo_throw<int>(fdStr);\n    if(! osutil::fdIsOpen(fd)){\n        QIErr() << \"_SHOURNAL_INTEGRATION_TEST_PIPE_FD is not open - number:\"\n                << fd;\n        return -1;\n    }\n    return fd;\n}\n\nconst QVersionNumber &app::version()\n{\n    // defined in cmake\n    static const QVersionNumber v = QVersionNumber::fromString(SHOURNAL_VERSION);\n    return v;\n}\n\nconst QVersionNumber &app::initialVersion()\n{\n    static const QVersionNumber v = QVersionNumber{0, 1}; // first version ever;\n    return v;\n}\n\n\n"
  },
  {
    "path": "src/common/app.h",
    "content": "#pragma once\n\n#include <QVersionNumber>\n#include <unordered_set>\n\nnamespace app {\n\nconst extern char* CURRENT_NAME;\nconst extern char* SHOURNAL;\nconst extern char* SHOURNAL_RUN;\nconst extern char* SHOURNAL_RUN_FANOTIFY;\nconst extern char* MSENTER_ONLY_GROUP;\nconst extern char* ENV_VAR_SOCKET_NB;\n\nconst extern std::unordered_set<QString> &VERBOSITIES;\n\nvoid setupNameAndVersion(const char *currentName);\nbool inIntegrationTestMode();\nint findIntegrationTestFd();\n\nconst QVersionNumber& version();\nconst QVersionNumber& initialVersion();\n\n\n}\n\n\n\n"
  },
  {
    "path": "src/common/cefd.cpp",
    "content": "\n#include <sys/eventfd.h>\n\n#include \"cefd.h\"\n#include \"os.h\"\n#include \"excos.h\"\n#include \"osutil.h\"\n\n\nCEfd::CEfd()\n{\n    m_fd = eventfd(0, EFD_CLOEXEC);\n    if (m_fd == -1){\n        throw os::ExcOs(\"eventfd failed\");\n    }\n}\n\nCEfd::~CEfd()\n{\n    teardown();\n}\n\nvoid CEfd::sendMsg(uint64_t n)\n{\n    os::write(m_fd, &n, sizeof (n));\n}\n\nuint64_t CEfd::recvMsg()\n{\n    uint64_t n;\n    // Block until parent process did the setup\n    if(os::read(m_fd, &n, sizeof(n)) != sizeof(n)){\n        throw os::ExcOs(\"cefd: read wrong size.\");\n    }\n    return n;\n}\n\nvoid CEfd::teardown()\n{\n    if(m_fd != -1){\n        osutil::closeVerbose(m_fd);\n        m_fd = -1;\n    }\n}\n"
  },
  {
    "path": "src/common/cefd.h",
    "content": "\n#pragma once\n\n#include <cstdint>\n\n#include \"util.h\"\n\nclass CEfd {\npublic:\n    static const uint64_t MSG_OK {7};\n    static const uint64_t MSG_FAIL {8};\n\n    CEfd();\n    ~CEfd();\n\n    void sendMsg(uint64_t n);\n    uint64_t recvMsg();\n\n    void teardown();\n\n\nprivate:\n    Q_DISABLE_COPY(CEfd)\n    DISABLE_MOVE(CEfd)\n\n    int m_fd;\n};\n"
  },
  {
    "path": "src/common/console_dialog.cpp",
    "content": "\n#include <cstdlib>\n#include <QTextStream>\n#include <QStandardPaths>\n\n\n#include \"compat.h\"\n#include \"console_dialog.h\"\n#include \"qoutstream.h\"\n#include \"util.h\"\n#include \"subprocess.h\"\n\nusing subprocess::Subprocess;\n\n/// Ask a simple yesno-question and return the result.\n/// @returns true, if \"y\", false if \"n\" was entered\nbool console_dialog::yesNo(const QString &question)\n{\n    const QString yesStr = qtr(\"y\");\n    const QString noStr = qtr(\"n\");\n    QOut() << QString(\"%1 (%2/%3) \").arg(question, yesStr, noStr);\n\n    QTextStream input(stdin);\n    while (true) {\n        QString respone = input.readLine();\n        if(respone.compare(yesStr, Qt::CaseSensitivity::CaseInsensitive) == 0){\n            return true;\n        }\n        if(respone.compare(noStr, Qt::CaseSensitivity::CaseInsensitive) == 0){\n            return false;\n        }\n        QOut() << qtr(\"Please enter %1 or %2\").arg(yesStr, noStr) << \"\\n\";\n    }\n\n}\n\n/// Open filepath within the users favourite editor,\n/// exported in environment variable EDITOR. If not set, try\n/// to find a typical editor such as nano, vim,...\n/// @return return value of the launched process. In case it did'nt exit normally,\n///         an os-exception is thrown,\n/// @throws QExcIo, os::ExcOs\nint console_dialog::openFileInExternalEditor(const QString &filepath)\n{\n\n    QString editor = getenv(\"EDITOR\");\n    subprocess::Args_t args;\n    if(editor.isEmpty()){\n        if((editor=QStandardPaths::findExecutable(\"nano\")).isEmpty())\n            if((editor=QStandardPaths::findExecutable(\"vim\")).isEmpty())\n                if((editor=QStandardPaths::findExecutable(\"vi\")).isEmpty()){\n                    throw QExcIo(qtr(\"No texteditor found, please set EDITOR \"\n                                     \"environment variable.\"));\n                }\n        args.push_back(editor.toStdString());\n    } else {\n        // support also EDITOR-strings like e.g. 'geany -i' -> if we cannot find\n        // the executable, try to split by space\n        if((QStandardPaths::findExecutable(editor)).isEmpty() ){\n            const auto splitted = editor.split(' ', Qt::SkipEmptyParts);\n            if(splitted.length() > 1){\n                for(const QString& s : splitted){\n                    args.push_back(s.toStdString());\n                }\n            } else {\n                // let it (probably) fail below:\n                args.push_back(editor.toStdString());\n            }\n        } else {\n            args.push_back(editor.toStdString());\n        }\n    }\n    args.push_back(filepath.toStdString());\n    Subprocess subproc;\n    subproc.call(args);\n    return subproc.waitFinish();\n}\n"
  },
  {
    "path": "src/common/console_dialog.h",
    "content": "#pragma once\n\n#include <QString>\n\nnamespace console_dialog {\n\nbool yesNo(const QString& question);\n\nint openFileInExternalEditor(const QString& filepath);\n\n}\n"
  },
  {
    "path": "src/common/cxxhash.cpp",
    "content": "\n#include <cassert>\n\n#include \"cxxhash.h\"\n#include <iostream>\n\n#include \"excos.h\"\n#include \"os.h\"\n\n\nCXXHash::CXXHash() :\n    m_pXXState(XXH64_createState())\n{\n    assert(m_pXXState != nullptr);\n    m_buf.resize(sysconf(_SC_PAGESIZE));\n}\n\nCXXHash::~CXXHash()\n{\n    XXH64_freeState(m_pXXState);\n}\n\nvoid CXXHash::resizeBuf(size_t n)\n{\n    assert(n > 0);\n    m_buf.resize(n);\n}\n\n/// @throws CXXHashError\nvoid CXXHash::reset(unsigned long long seed)\n{\n    XXH_errorcode err=XXH64_reset(m_pXXState, seed);\n    if(err == XXH_ERROR ){\n        throw ExcCXXHash(\"reset failed\", err);\n    }\n}\n\n/// @throws CXXHashError\nvoid CXXHash::update(const void *buffer, size_t len)\n{\n    XXH_errorcode err = XXH64_update(m_pXXState, buffer, len);\n    if(err == XXH_ERROR ){\n        throw ExcCXXHash(\"update failed\", err);\n    }\n}\n\nstruct partial_xxhash_result CXXHash::digestWholeFile(int fd, int chunksize)\n{\n    return this->digestFile(fd, chunksize, 0, std::numeric_limits<int>::max());\n}\n\npartial_xxhash_result CXXHash::digestFile(int fd, int chunksize,\n                                off64_t seekstep, int maxCountOfReads)\n{\n    struct partial_xxhash part_hash;\n    part_hash.xxh_state = m_pXXState;\n    part_hash.chunksize = chunksize;\n    part_hash.seekstep = seekstep;\n    part_hash.max_count_of_reads = maxCountOfReads;\n    part_hash.buf = m_buf.data();\n    part_hash.bufsize = m_buf.size();\n\n    struct partial_xxhash_result res;\n    auto ret = partial_xxh_digest_file(fd, &part_hash, &res);\n    if(ret != 0){\n        throw ExcCXXHash(\"digest failed: \", int(ret));\n    }\n    return res;\n}\n\n\n/*\n/// XXHASH-digest a whole file or parts of it at regular intervals.\n/// @param fd the fildescriptor of the file. Note that in general you would want\n///           to make sure, that the offset is at 0. Note that the offset\n///           may be changed during the call.\n/// @param chunksize size of the chunks to read at once.\n/// @param seekstep Read chunks from the file every seekstep bytes. The read chunk\n///                 does not count into this, so if you actually want to skip bytes,\n///                 seekstep must be greater than chunksize. Otherwise NO SEEK is\n///                 performed at all.\n/// @param maxCountOfReads stop reading and digest after that count of 'read'-\n///                        operations.\n/// @returns the calculated hash and the actual count of bytes read.\n///          If the count of bytes is zero, the hash is invalid.\n/// @throws ExcOs, CXXHashError\nCXXHash::DigestResult CXXHash::digestFile(int fd, int chunksize,\n                                off64_t seekstep, int maxCountOfReads)\n{\n    /// Implementation detail:\n    /// Calling XXH64_update introduces some overhead, which can be avoided by\n    /// calling XXH64() directly with a sufficiently large buffer.\n    /// So, if our buffer is large enough, read the chunks from file\n    /// one by one into our own buffer. If it's full, call XXH64_update,\n    /// else do it alltogether at the end.\n\n    assert(maxCountOfReads > 0);\n    assert(chunksize > 0);\n    if(chunksize > int(m_buf.size())){\n        m_buf.resize(chunksize);\n    }\n    const bool doSeek = seekstep > chunksize;\n\n    DigestResult res;\n    off64_t offset=0;\n    res.countOfbytes = 0;\n    char* bufRaw = m_buf.data();\n    char* bufRawEnd = bufRaw + m_buf.size();\n    bool updateNecessary = false;\n    for(int countOfReads=0; countOfReads < maxCountOfReads ; ++countOfReads) {\n        ssize_t readBytes = os::read(fd, bufRaw, static_cast<size_t>(chunksize));\n        bufRaw += readBytes;\n        res.countOfbytes += readBytes;\n        if(readBytes < chunksize) {\n            break; // EOF\n        }\n        if(bufRawEnd - bufRaw <= chunksize){\n            // not enough space for another read: flush buffer\n            if(! updateNecessary){\n                updateNecessary = true;\n                this->reset(0);\n            }\n            this->update(m_buf.data(), bufRaw - m_buf.data());\n            bufRaw = m_buf.data();\n        }\n        if( doSeek ) {\n            offset += seekstep;\n            os::lseek(fd, offset, SEEK_SET);\n        }\n    }\n    if(res.countOfbytes == 0){\n        res.hash = 0;\n        return res;\n    }\n    // we read something\n    if(updateNecessary){\n        if(bufRaw != m_buf.data()){\n            this->update(m_buf.data(), bufRaw - m_buf.data());\n        }\n        res.hash = XXH64_digest(m_pXXState);\n        return res;\n    }\n    // No update was needed (all chunks fitted into buffer). Flush the whole buffer at once without\n    // xxhash state overhead (update/reset)\n    assert(bufRaw != m_buf.data());\n    res.hash = XXH64(m_buf.data(), bufRaw - m_buf.data(), 0 );\n    return res;\n}\n*/\n\n\nCXXHash::ExcCXXHash::ExcCXXHash(const std::string &msg, int errorcode) :\n    m_errorcode(errorcode) {\n    m_descrip = \"XXHashError occurred: \" +\n            msg + \" - errorcode: \" +\n            std::to_string(m_errorcode) ;\n}\n\n\nconst char *CXXHash::ExcCXXHash::what() const noexcept {\n    return m_descrip.c_str();\n}\n"
  },
  {
    "path": "src/common/cxxhash.h",
    "content": "#pragma once\n\n\n#include <cstddef>\n#include <unistd.h>\n#include <string>\n#include <limits>\n\n#include \"xxhash.h\"\n#include \"strlight.h\"\n#include \"xxhash_common.h\"\n\n\n/// A cpp interface around the needed c-functions of XXHASH and\n/// some other methods (digestFile).\n/// For further documentation of the wrapper-only-functions please head to the\n/// documentation of the c-api.\nclass CXXHash\n{\npublic:\n    class ExcCXXHash : public std::exception\n    {\n    public:\n        explicit ExcCXXHash(const std::string & msg, int errorcode);\n        const char *what () const noexcept override;\n\n    private:\n        std::string m_descrip;\n        int m_errorcode;\n    };\n\n\n    CXXHash();\n    ~CXXHash();\n\n    void resizeBuf(size_t n);\n\n    struct partial_xxhash_result digestWholeFile(int fd, int chunksize);\n    struct partial_xxhash_result digestFile(int fd, int chunksize, off64_t seekstep,\n                            int maxCountOfReads=std::numeric_limits<int>::max());\n\npublic:\n    CXXHash(const CXXHash&) = delete;\n    void operator=(const CXXHash&) = delete;\n\nprivate:\n    void reset(unsigned long long seed=0);\n    void update(const void* buffer, size_t len);\n\n    XXH64_state_t * const m_pXXState;\n    StrLight m_buf;\n};\n\n\n\n\n"
  },
  {
    "path": "src/common/database/command_query_iterator.cpp",
    "content": "\n#include <QDebug>\n#include \"command_query_iterator.h\"\n#include \"util.h\"\n#include \"db_connection.h\"\n#include \"db_conversions.h\"\n#include \"db_controller.h\"\n\n\n///  @param reverseIter: if true, instead of calling next(), previous() will be called\n/// on the passed query.\nCommandQueryIterator::CommandQueryIterator(std::shared_ptr<QSqlQueryThrow>& query, bool reverseIter) :\n    m_cmdQuery(query),\n    m_tmpQuery(db_connection::mkQuery()),\n    m_reverseIter(reverseIter)\n{\n}\n\n// set cursor to next or previous, if reverseIter was set on constructor\nbool CommandQueryIterator::next()\n{\n    m_cmd.clear();\n    const bool nextRet = (m_reverseIter) ? m_cmdQuery->previous() : m_cmdQuery->next();\n    if(nextRet){\n        fillCommand();\n    }\n    return nextRet;\n}\n\nCommandInfo &CommandQueryIterator::value()\n{\n    return m_cmd;\n}\n\nint CommandQueryIterator::computeSize()\n{\n    return m_cmdQuery->computeSize();\n}\n\n\n\nvoid CommandQueryIterator::fillCommand()\n{\n    int i=0;\n    m_cmd.idInDb = qVariantTo_throw<qint64>(m_cmdQuery->value(i++));\n    m_cmd.text = m_cmdQuery->value(i++).toString();\n    m_cmd.returnVal = m_cmdQuery->value(i++).toInt();\n    m_cmd.startTime = m_cmdQuery->value(i++).toDateTime();\n    m_cmd.endTime = m_cmdQuery->value(i++).toDateTime();\n    m_cmd.workingDirectory = m_cmdQuery->value(i++).toString();\n\n    m_cmd.sessionInfo.uuid = m_cmdQuery->value(i++).toByteArray();\n    m_cmd.sessionInfo.comment = m_cmdQuery->value(i++).toString();\n\n    QVariant hashChunksize = m_cmdQuery->value(i++);\n    if(! hashChunksize.isNull()){\n        qVariantTo_throw(hashChunksize, &m_cmd.hashMeta.chunkSize) ;\n        qVariantTo_throw(m_cmdQuery->value(i++), &m_cmd.hashMeta.maxCountOfReads);\n    } else {\n        i++;\n    }\n    m_cmd.username = m_cmdQuery->value(i++).toString();\n    m_cmd.hostname = m_cmdQuery->value(i++).toString();\n\n    fillWrittenFiles();\n    m_cmd.fileReadInfos = db_controller::queryReadInfos_byCmdId(m_cmd.idInDb);\n}\n\nvoid CommandQueryIterator::fillWrittenFiles()\n{\n    m_tmpQuery->prepare(\"select writtenFile.id,writtenFile_path.path,writtenFile.name,\"\n                        \"writtenFile.mtime,writtenFile.size,writtenFile.hash \"\n                        \"from writtenFile \"\n                        \"join pathtable as writtenFile_path \"\n                        \"on writtenFile.pathId=writtenFile_path.id \"\n                        \"where cmdId=?\");\n    m_tmpQuery->addBindValue(m_cmd.idInDb);\n    m_tmpQuery->exec();\n    while(m_tmpQuery->next()){\n        int i=0;\n        FileWriteInfo fInfo;\n        fInfo.idInDb = qVariantTo_throw<qint64>(m_tmpQuery->value(i++));\n        fInfo.path = m_tmpQuery->value(i++).toString();\n        fInfo.name = m_tmpQuery->value(i++).toString();\n        fInfo.mtime = m_tmpQuery->value(i++).toDateTime();\n        fInfo.size =  qVariantTo_throw<qint64>(m_tmpQuery->value(i++));\n        fInfo.hash = db_conversions::toHashValue(m_tmpQuery->value(i++));\n        m_cmd.fileWriteInfos.push_back(fInfo);\n    }\n}\n\n\n\n"
  },
  {
    "path": "src/common/database/command_query_iterator.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"qsqlquerythrow.h\"\n#include \"commandinfo.h\"\n#include \"db_connection.h\"\n\nclass CommandQueryIterator\n{\npublic:\n    CommandQueryIterator(std::shared_ptr<QSqlQueryThrow> &query, bool reverseIter);\n\n    bool next();\n\n    CommandInfo& value();\n\n    int computeSize();\n\npublic:\n    CommandQueryIterator(const CommandQueryIterator &) = delete ;\n    void operator=(const CommandQueryIterator &) = delete ;\n\nprivate:\n\n    void fillCommand();\n    void fillWrittenFiles();\n\n    std::shared_ptr<QSqlQueryThrow> m_cmdQuery;\n    QueryPtr m_tmpQuery;\n    CommandInfo m_cmd;\n    bool m_reverseIter;\n};\n\n"
  },
  {
    "path": "src/common/database/commandinfo.cpp",
    "content": "\n#include <QHostInfo>\n#include <QString>\n#include <QJsonArray>\n\n#include \"commandinfo.h\"\n\n#include \"os.h\"\n#include \"settings.h\"\n#include \"db_globals.h\"\n#include \"conversions.h\"\n\n/// Settings must be loaded beforehand!\n/// Fill commandInfo with those information independent from the current\n/// command. The following properties yet *have* to be set:\n/// startTime, endTime, text\n/// The following *may* be set:\n/// returnVal\nCommandInfo CommandInfo::fromLocalEnv()\n{\n    CommandInfo cmd;\n\n    cmd.hostname = QHostInfo::localHostName();\n    cmd.username = os::getUserName<QString>();\n    // Do not:\n    // cmd.workingDirectory = QDir::currentPath();\n    // If the working directory is deleted, this returns a null string, which is not very\n    // informative (and also not allowed in the database scheme). Using below approach returns\n    // a valid string with a trailing ' (deleted)', if appropriate.\n    cmd.workingDirectory = QString::fromLocal8Bit(os::readlink<QByteArray>(\"/proc/self/cwd\"));\n    auto & sets = Settings::instance();\n    if(sets.hashSettings().hashEnable){\n        cmd.hashMeta = sets.hashSettings().hashMeta;\n    }\n    return cmd;\n}\n\nCommandInfo::CommandInfo()\n    : idInDb(db::INVALID_INT_ID),\n      text(\"\"), // empty string, so QString.isNull() returns false -> no null-inserts into database\n      returnVal(INVALID_RETURN_VAL)\n{}\n\nvoid CommandInfo::write(QJsonObject &json, bool withMilliseconds,\n                        const CmdJsonWriteCfg &writeCfg) const\n{\n    if(writeCfg.idInDb) json[\"id\"] = idInDb;\n    if(writeCfg.text) json[\"command\"] = text;\n    if(writeCfg.returnVal) json[\"returnValue\"] = returnVal;\n    if(writeCfg.username) json[\"username\"] = username;\n    if(writeCfg.hostname) json[\"hostname\"] = hostname;\n\n    if(writeCfg.hashMeta) {\n        QJsonValue hashChunkSize;\n        QJsonValue hashMaxCountOfReads;\n        if(! hashMeta.isNull()){\n            hashChunkSize = hashMeta.chunkSize;\n            hashMaxCountOfReads = hashMeta.maxCountOfReads;\n        }\n        json[\"hashChunkSize\"] = hashChunkSize;\n        json[\"hashMaxCountOfReads\"] = hashMaxCountOfReads;\n    }\n\n    // A null-session-QString becomes a quoted string in json, instead of null, so below\n    // effort is necessary (invalid session should always be null: in database, shournal and js-plot...).\n    if(writeCfg.sessionInfo){\n        json[\"sessionUuid\"] = (sessionInfo.uuid.isNull()) ? QJsonValue() :\n                                                        QString::fromLatin1(sessionInfo.uuid.toBase64());\n    }\n\n    if(withMilliseconds){\n        if(writeCfg.startEndTime){\n            json[\"startTime\"] = startTime.toString(Conversions::dateIsoFormatWithMilliseconds());\n            json[\"endTime\"] = endTime.toString(Conversions::dateIsoFormatWithMilliseconds());\n        }\n    } else {\n        if(writeCfg.startEndTime){\n            json[\"startTime\"] = QJsonValue::fromVariant(startTime);\n            json[\"endTime\"] = QJsonValue::fromVariant(endTime);\n        }\n    }\n    if(writeCfg.workingDirectory) json[\"workingDir\"] = workingDirectory;\n\n    if(writeCfg.fileReadInfos){\n        QJsonArray fReadArr;\n        int idx = 0;\n        for(const auto& info : fileReadInfos){\n\n            QJsonObject fReadObj;\n            info.write(fReadObj);\n            fReadObj[\"status\"] = (writeCfg.fileStatus) ? info.currentStatus(*this) : \"NA\";\n            fReadArr.append(fReadObj);\n            ++idx;\n            if(idx >= writeCfg.maxCountRFiles){\n                break;\n            }\n        }\n        json[\"fileReadEvents\"] = fReadArr;\n    }\n\n    if(writeCfg.fileWriteInfos){\n        QJsonArray fWriteArr;\n        int idx = 0;\n        for(const auto& info : fileWriteInfos){\n            QJsonObject fWObject;\n            info.write(fWObject);\n            fWObject[\"status\"] = (writeCfg.fileStatus) ? info.currentStatus(*this) : \"NA\";\n            fWriteArr.append(fWObject);\n            ++idx;\n            if(idx >= writeCfg.maxCountWFiles){\n                break;\n            }\n        }\n        json[\"fileWriteEvents\"] = fWriteArr;\n    }\n\n}\n\nbool CommandInfo::operator==(const CommandInfo &rhs) const\n{\n    if(idInDb != db::INVALID_INT_ID && rhs.idInDb != db::INVALID_INT_ID){\n        return idInDb == rhs.idInDb;\n    }\n\n    return text == rhs.text &&\n           returnVal == rhs.returnVal &&\n           username == rhs.username &&\n           hostname == rhs.hostname &&\n           hashMeta == rhs.hashMeta &&\n           sessionInfo == rhs.sessionInfo &&\n           fileWriteInfos == rhs.fileWriteInfos &&\n           fileReadInfos == rhs.fileReadInfos &&\n           startTime == rhs.startTime &&\n           endTime == rhs.endTime &&\n           workingDirectory == rhs.workingDirectory;\n}\n\nvoid CommandInfo::clear()\n{\n    fileWriteInfos.clear();\n    fileReadInfos.clear();\n    idInDb = db::INVALID_INT_ID;\n}\n\n\n\n"
  },
  {
    "path": "src/common/database/commandinfo.h",
    "content": "#pragma once\n\n#include <QString>\n#include <QVector>\n#include <QJsonObject>\n\n#include \"hashmeta.h\"\n#include \"sessioninfo.h\"\n#include \"fileinfos.h\"\n\n\ntypedef QVector<FileWriteInfo> FileWriteInfos;\ntypedef QVector<FileReadInfo> FileReadInfos;\n\n/// Configure which fields shall be written\n/// to JSON on CommandInfo.write() (and how many\n/// entries of some fields)\nstruct CmdJsonWriteCfg {\n    CmdJsonWriteCfg(bool initAll) :\n        idInDb(initAll),\n        text(initAll),\n        returnVal(initAll),\n        username(initAll),\n        hostname(initAll),\n        hashMeta(initAll),\n        sessionInfo(initAll),\n        startEndTime(initAll),\n        workingDirectory(initAll),\n        fileWriteInfos(initAll),\n        fileReadInfos(initAll)\n    {}\n\n    bool idInDb;\n    bool text;\n    bool returnVal;\n    bool username;\n    bool hostname;\n    bool hashMeta;\n    bool sessionInfo;\n\n    bool startEndTime;\n    bool workingDirectory;\n\n    bool fileWriteInfos;\n    bool fileReadInfos;\n\n    int maxCountWFiles{std::numeric_limits<int>::max()};\n    int maxCountRFiles{std::numeric_limits<int>::max()};\n\n    bool fileStatus{false};\n};\n\nstruct CommandInfo\n{\n    // Invalid return value set, if no return value could be determined (e.g. because\n    // the shell-process called execve() before fork\n    static const qint32 INVALID_RETURN_VAL = std::numeric_limits<qint32>::max();\n\n    static CommandInfo fromLocalEnv();\n\n    CommandInfo();\n    qint64 idInDb;\n    QString text;\n    qint32 returnVal;\n    QString username;\n    QString hostname;\n    HashMeta hashMeta;\n    SessionInfo sessionInfo;\n\n    QDateTime startTime;\n    QDateTime endTime;\n    QString workingDirectory;\n\n    FileWriteInfos fileWriteInfos;\n    FileReadInfos fileReadInfos;\n\n    void write(QJsonObject &json, bool withMilliseconds=false,\n               const CmdJsonWriteCfg& writeCfg=CmdJsonWriteCfg(true)) const;\n\n    bool operator==(const CommandInfo& rhs) const;\n\n    void clear();\n\n};\n\n"
  },
  {
    "path": "src/common/database/db_connection.cpp",
    "content": "\n#include <cassert>\n\n#include <QSqlDatabase>\n#include <QSqlQuery>\n#include <QSqlError>\n#include <QStandardPaths>\n#include <QSqlDriver>\n#include <QFileInfo>\n#include <QDir>\n#include <unistd.h>\n\n#include \"compat.h\"\n#include \"db_connection.h\"\n#include \"cflock.h\"\n#include \"sqlite_database_scheme.h\"\n#include \"sqlite_database_scheme_updates.h\"\n#include \"qexcdatabase.h\"\n#include \"qfilethrow.h\"\n#include \"qsqlquerythrow.h\"\n#include \"logger.h\"\n#include \"app.h\"\n#include \"util.h\"\n#include \"staticinitializer.h\"\n#include \"settings.h\"\n\nstatic QSqlDatabase* g_db = nullptr;\n\nstatic bool versionTableExists(QSqlQueryThrow& query){\n    logDebug << \"checking for version table...\";\n    query.exec(\"SELECT name FROM sqlite_master WHERE type='table' AND name='version'\");\n    return query.next();\n}\n\nstatic QVersionNumber queryVersion(QSqlQueryThrow& query){\n    query.exec(\"select ver from version\");\n    query.next(true);\n    return QVersionNumber::fromString(query.value(0).toString());\n}\n\nstatic void newSqliteDbIfNeeded(){\n    static StaticInitializer loader( [](){\n        // maybe_todo: according to documentation of QSqlDatabase, rather\n        // call QSqlDatabase::database() instead of storing the database\n        // ourselves.\n        g_db = new QSqlDatabase(QSqlDatabase::addDatabase(\"QSQLITE\"));\n        if(! g_db->isValid()){\n            throw QExcDatabase(qtr(\"Failed to add qt's sqlite database driver. \"\n                                   \"Is the driver installed?\"));\n        }\n        // give enough time, e.g. for cases where the db is stored on a nfs-drive.\n        g_db->setConnectOptions(\"QSQLITE_BUSY_TIMEOUT=15000\");\n    });\n}\n\nstatic void updateDbScheme(QSqlQueryThrow& query, const QVersionNumber& latestSchemeVer){\n    const auto dbVersion = queryVersion(query);\n    if(dbVersion == latestSchemeVer){\n        return;\n    }\n    if(dbVersion > latestSchemeVer){\n        logWarning << qtr(\"The database version (%1) is higher than the scheme version \"\n                          \"(%2). Note that downgrades of the database are *not* \"\n                          \"supported, so things may go wrong. Please update shournal \"\n                          \"(on this machine).\")\n                      .arg(dbVersion.toString()).arg(latestSchemeVer.toString());\n        return;\n    }\n    // the version is smaller -> perform all necessary updates\n\n    if(dbVersion < QVersionNumber{0, 9}){\n        logDebug << \"updating db to 0.9...\";\n        sqlite_database_scheme_updates::v0_9(query);\n    }\n    if(dbVersion < QVersionNumber{2, 1}){\n        logDebug << \"updating db to 2.1...\";\n        sqlite_database_scheme_updates::v2_1(query);\n    }\n\n    if(dbVersion < QVersionNumber{2, 2}){\n        logDebug << \"updating db to 2.2...\";\n        sqlite_database_scheme_updates::v2_2(query);\n    }\n\n    if(dbVersion < QVersionNumber{2, 4}){\n        logDebug << \"updating db to 2.4...\";\n        sqlite_database_scheme_updates::v2_4(query);\n    }\n\n    if(dbVersion < QVersionNumber{2, 5}){\n        logDebug << \"updating db to 2.5...\";\n        sqlite_database_scheme_updates::v2_5(query);\n    }\n\n    query.prepare(\"replace into version (id, ver) values (1, ?)\");\n    query.addBindValue(latestSchemeVer.toString());\n    query.exec();\n\n}\n\nstatic void\ncreateOrUpDateDb(QSqlQueryThrow &query, const QVersionNumber& latestSchemeVer){\n    logDebug << \"about to lockExclusive database for scheme update...\";\n    // quoting sqlite.org/foreignkeys.html\n    // \"It is not possible to enable or disable foreign key constraints in the\n    //  middle of a multi-statement transaction (when SQLite is not in autocommit mode)\"\n    // The scheme-updates require foreign_keys=OFF, so call below pragma:\n    query.exec(\"PRAGMA foreign_keys=OFF\");\n    QFileThrow lockfile(db_connection::getDatabaseDir() + \"/.shournal-dblock\");\n    lockfile.open(QFile::OpenModeFlag::ReadWrite);\n    // Lock exclusively on scheme update. Note that for some reason concurrent\n    // processes executing \"PRAGMA locking_mode=EXCLUSIVE; BEGIN EXCLUSIVE;\" deadlocked\n    // during integration tests, so be careful with that directive.\n    CFlock lock(lockfile.handle());\n    lock.lockExclusive();\n    query.transaction();\n\n    if(! versionTableExists(query)){\n        logInfo << qtr(\"Creating new sqlite database\");\n        QStringList statements = QString(\n                    SQLITE_DATABASE_SCHEME).split(';', Qt::SkipEmptyParts);\n        for(const QString& stmt : statements){\n            query.exec(stmt);\n        }\n\n        QFile dbDir(db_connection::getDatabaseDir());\n        if(! dbDir.setPermissions(\n             QFileDevice::ReadOwner|QFileDevice::WriteOwner|QFileDevice::ExeOwner)){\n            logWarning << qtr(\"Failed to initially set permissions on the database-\"\n                              \"directory at %1: %2. Other users might be able \"\n                              \"to browse your command history...\")\n                          .arg(db_connection::getDatabaseDir(), dbDir.errorString());\n        }\n    }\n    updateDbScheme(query, latestSchemeVer);\n\n    query.commit();\n    lock.unlock();\n    // outside of transaction (see above):\n    // Allow for delete queries with cascades\n    query.exec(\"PRAGMA foreign_keys=ON\");\n}\n\n/// @throws QExcDatabase\nstatic void openAndPrepareSqliteDb()\n{\n    const QString appDataLoc = db_connection::mkDbPath();\n    const QString dbPath = appDataLoc + \"/database.db\";\n    g_db->setDatabaseName(dbPath);\n    if(! g_db->open()) {\n        throw QExcDatabase(__func__, g_db->lastError());\n    }\n\n    // Until shournal v3.2 the database version was always set to the application version.\n    // This required a synchronized update of all machines sharing the same database.\n    // Therefore, only update the database version if a scheme update is necessary.\n    auto latestSchemeVer = QVersionNumber{3, 2};\n    QSqlQueryThrow query(*g_db);\n    if(! versionTableExists(query)){\n        logDebug << \"version table did not exist yet..\";\n        createOrUpDateDb(query, latestSchemeVer);\n    } else {\n        const auto dbVersion = queryVersion(query);\n        logDebug << \"current db-version\" << dbVersion.toString()\n                 << \"latestSchemeVer\" << latestSchemeVer.toString();\n        if(dbVersion != latestSchemeVer){\n            createOrUpDateDb(query, latestSchemeVer);\n        }\n    }\n\n    // Allow for delete queries with cascades\n    query.exec(\"PRAGMA foreign_keys=ON\");\n}\n\n\n\n\nQString db_connection::getDatabaseDir(){\n    auto & sets = Settings::instance();\n    return sets.dataDir();\n}\n\n/// @return the created dir\nQString db_connection::mkDbPath()\n{\n    const QString & appDataLoc = db_connection::getDatabaseDir();\n    QDir d(appDataLoc);\n    if( ! d.mkpath(appDataLoc)){\n        throw QExcIo(qtr(\"Failed to the create directory for the database at %1\")\n                             .arg(appDataLoc));\n    }\n    return  appDataLoc;\n}\n\n\n\nQueryPtr db_connection::mkQuery()\n{\n    setupIfNeeded();\n    return std::make_shared<QSqlQueryThrow>(*g_db);\n}\n\n/// merely for test purposes\nvoid db_connection::close()\n{\n    g_db->close();\n}\n\nvoid db_connection::setupIfNeeded()\n{\n    newSqliteDbIfNeeded();\n    if(! g_db->isOpen()){\n        openAndPrepareSqliteDb();\n    }\n}\n"
  },
  {
    "path": "src/common/database/db_connection.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"qsqlquerythrow.h\"\n\ntypedef std::shared_ptr<QSqlQueryThrow> QueryPtr;\n\nnamespace db_connection {\n\nQString getDatabaseDir();\nQString mkDbPath();\n\nvoid setupIfNeeded();\nQueryPtr mkQuery();\n\nvoid close();\n}\n\n"
  },
  {
    "path": "src/common/database/db_controller.cpp",
    "content": "\n#include <fcntl.h>\n#include <csignal>\n#include <QSqlDatabase>\n#include <QSqlRecord>\n#include <QSql>\n#include <QSqlDriver>\n#include <QDateTime>\n#include <cassert>\n\n#include \"db_controller.h\"\n#include \"db_connection.h\"\n#include \"db_conversions.h\"\n#include \"db_globals.h\"\n#include \"qexcdatabase.h\"\n#include \"qsqlquerythrow.h\"\n#include \"insertifnotexist.h\"\n#include \"query_columns.h\"\n#include \"logger.h\"\n#include \"util.h\"\n#include \"cleanupresource.h\"\n#include \"storedfiles.h\"\n#include \"interrupt_handler.h\"\n#include \"os.h\"\n#include \"qoutstream.h\"\n\nusing namespace db_conversions;\nusing db_controller::InsertIfNotExist;\n\n\nstatic void\ninsertFileWriteEvent(const QueryPtr& query, const CommandInfo &cmd,\n                FileEvent* e )\n{\n    auto pathFnamePair =  splitAbsPath(QString(e->path()));\n    query->prepare(query->insertIgnorePreamble() + \" into pathtable (path)\"\n                   \"values (?)\");\n    query->addBindValue(pathFnamePair.first);\n    query->exec();\n    query->prepare(query->insertIgnorePreamble() +\n                   \" into writtenFile (cmdId,pathId,name,mtime,size,hash) \"\n                   \"values (?,\"\n                   \"(select `id` from pathtable where path=?),\"\n                   \"?,?,?,?)\");\n\n    query->addBindValue(cmd.idInDb);\n    query->addBindValue(pathFnamePair.first);\n    query->addBindValue(pathFnamePair.second);\n    query->addBindValue(fromMtime(e->mtime()));\n    query->addBindValue(static_cast<qint64>(e->size()));\n    query->addBindValue(fromHashValue(e->hash()));\n    query->exec();\n}\n\n/// Move or copy the file captured along the read event e\n/// to the read files directory in shournal's database dir.\nstatic void\ncopyToStoredFiles(const FileEvent* e,\n                             const QByteArray& storedFilesDir,\n                             const QByteArray& idInDatabase){\n    const auto fullDestPath = pathJoinFilename(storedFilesDir, idInDatabase);\n    int out_fd = os::open(strDataAccess(fullDestPath), os::OPEN_WRONLY | os::OPEN_CREAT);\n    auto autoCloseOutFd = finally([&out_fd] { close(out_fd); });\n\n    try {\n        os::sendfile(out_fd, fileno_unlocked(e->file()),\n                     e->fileContentSize(),\n                     e->fileContentStart());\n    } catch (const os::ExcOs& ex) {\n        logWarning << QString(\"Failed to send file to %1 - %2\")\n                       .arg(fullDestPath.constData())\n                       .arg(ex.what());\n        throw;\n    }\n\n\n}\n\nstatic void\ninsertFileReadEvent(const QueryPtr& query, const CommandInfo &cmd,\n                    const QVariant& envId, const QVariant& hashMetaId,\n                    FileEvent* e )\n{\n    StoredFiles storedFiles;\n    const QByteArray storedFilesDir = storedFiles.getReadFilesDir().toUtf8();\n\n    const auto pathFnamePair = splitAbsPath(QString(e->path()));\n    query->prepare(query->insertIgnorePreamble() + \" into pathtable (path)\"\n                   \"values (?)\");\n    query->addBindValue(pathFnamePair.first);\n    query->exec();\n\n    InsertIfNotExist insIfnExist(*query, \"readFile\");\n    insIfnExist.addSimple(\"envId\", envId);\n    insIfnExist.addSimple(\"name\", pathFnamePair.second);\n    insIfnExist.addEntry(\"pathId\", {pathFnamePair.first},\n                         \"(select id from pathtable where path=?)\");\n\n    insIfnExist.addSimple(\"mtime\",fromMtime(e->mtime()));\n    insIfnExist.addSimple(\"size\", qint64(e->size()));\n    insIfnExist.addSimple(\"mode\", qint64(e->mode()));\n    insIfnExist.addSimple(\"hash\", fromHashValue(e->hash()));\n    insIfnExist.addSimple(\"hashmetaId\", hashMetaId);\n    insIfnExist.addSimple(\"isStoredToDisk\", e->fileContentSize() > 0);\n\n    bool existed;\n    const auto readFileId = insIfnExist.exec(&existed);\n\n    if(! existed && e->fileContentSize() > 0){\n        copyToStoredFiles(e, storedFilesDir, readFileId.toByteArray());\n    }\n    query->prepare(query->insertIgnorePreamble() +\n                   \" into readFileCmd (cmdId, readFileId) values (?,?)\");\n    query->addBindValue(cmd.idInDb);\n    query->addBindValue(readFileId);\n    query->exec();\n\n}\n\n\n\n\n\n/// sql allows for cascade deleting orphans (children), here we kill\n/// parents, where all children died\nstatic void\ndeleteChildlessParents(const QueryPtr& query){\n    logDebug << \"delete from hashmeta...\";\n    query->exec(\"delete from hashmeta where not exists \"\n               \"(select 1 from cmd where cmd.hashmetaId=hashmeta.id)\");\n    logDebug << \"delete from session...\";\n    query->exec(\"delete from session where not exists \"\n               \"(select 1 from cmd where cmd.sessionId=session.id)\");\n\n    // delete stored read files (script files) in filesystem AND database\n    query->setForwardOnly(true);\n    query->prepare(\"select readFile.id from readFile where \"\n        \"readFile.isStoredToDisk=? and \"\n        \"not exists (select 1 from readFileCmd where readFileCmd.readFileId=readFile.id) \");\n    query->bindValue(0, true);\n    query->exec();\n    StoredFiles storedFiles;\n    logDebug << \"looping though read 'script' files to evtl. delete from filesystem...\";\n    while(query->next()){\n        const QString fname = query->value(0).toString();\n        if(! storedFiles.deleteReadFile(fname) ){\n            logWarning << qtr(\"failed to remove the file with name %1 \"\n                              \"from the read files dir.\").arg(fname);\n        }\n    }\n    logDebug << \"delete from readFile...\";\n    query->exec(\"delete from readFile where not exists \"\n               \"(select 1 from readFileCmd where readFileCmd.readFileId=readFile.id)\");\n\n    // Do it last -> foreign key in readFile\n    logDebug << \"delete from env...\";\n    query->exec(\"delete from env where not exists (select 1 from cmd where \"\n               \"cmd.envId=env.id)\");\n\n    query->exec(\"delete from pathtable where not exists \"\n                \"(select 1 from writtenFile where writtenFile.pathId=pathtable.id) \"\n                \"and not exists \"\n                \"(select 1 from readFile where readFile.pathId=pathtable.id)\");\n}\n\n\nstatic FileReadInfos\nqueryFileReadInfos(const SqlQuery& sqlQ, const QueryPtr& query_=nullptr, const QString& optionalJoins={}){\n    const QueryPtr query = (query_ != nullptr) ? query_ : db_connection::mkQuery();\n    FileReadInfos readInfos;\n    query->prepare(\"select readFile.id,readFile_path.path,name,mtime,size,\"\n                   \"mode,hash,isStoredToDisk from readFile \"\n                   \"join pathtable as readFile_path \"\n                   \"on readFile.pathId=readFile_path.id \"\n                   + optionalJoins + \" where \" + sqlQ.query());\n    query->addBindValues(sqlQ.values());\n    query->exec();\n    while(query->next()){\n        int i=0;\n        FileReadInfo fInfo;\n        fInfo.idInDb = qVariantTo_throw<qint64>(query->value(i++));\n        fInfo.path = query->value(i++).toString();\n        fInfo.name = query->value(i++).toString();\n        fInfo.mtime = query->value(i++).toDateTime();\n        fInfo.size =  qVariantTo_throw<qint64>(query->value(i++));\n        fInfo.mode =  qVariantTo_throw<mode_t>(query->value(i++));\n        fInfo.hash = db_conversions::toHashValue(query->value(i++));\n        fInfo.isStoredToDisk = query->value(i++).toBool();\n\n        readInfos.push_back(fInfo);\n    }\n    return readInfos;\n}\n\n\n/////////////////////// public ////////////////////////////////\n\n\n/// @return the new command id in database\n/// @throws QExcDatabase\nqint64 db_controller::addCommand(const CommandInfo &cmd)\n{\n    auto query = db_connection::mkQuery();\n    query->transaction();\n\n\n    query->prepare(query->insertIgnorePreamble() + \" into env (hostname, username) values (?,?)\");\n    query->addBindValue(cmd.hostname);\n    query->addBindValue(cmd.username);\n    query->exec();\n\n    query->prepare(\"select id from env where hostname=? and username=?\");\n    query->addBindValue(cmd.hostname);\n    query->addBindValue(cmd.username);\n    query->exec();\n    query->next(true);\n    const auto envId = qVariantTo_throw<qint64>(query->value(0));\n\n    if(! cmd.hashMeta.isNull()) {\n        query->prepare(query->insertIgnorePreamble() +\n                      \" into hashmeta (chunkSize, maxCountOfReads) values (?,?)\");\n        query->addBindValue(cmd.hashMeta.chunkSize);\n        query->addBindValue(cmd.hashMeta.maxCountOfReads);\n        query->exec();\n    }\n\n    if(! cmd.sessionInfo.uuid.isNull()) {\n        query->prepare(query->insertIgnorePreamble() +\n                      \" into session (id) values (?)\");\n        query->addBindValue(cmd.sessionInfo.uuid);\n        query->exec();\n    }\n\n    query->prepare(\"insert into cmd (txt,envId,hashmetaId,returnVal,\"\n                  \"startTime,endTime,workingDirectory,sessionId) \"\n                  \"values (?,?,\"\n                  \"(select id from hashmeta where chunkSize=? and maxCountOfReads=?),\"\n                  \"?,?,?,?,?)\"\n                  );\n    query->addBindValue(cmd.text);\n    query->addBindValue(envId);\n    query->addBindValue(cmd.hashMeta.chunkSize);\n    query->addBindValue(cmd.hashMeta.maxCountOfReads);\n    query->addBindValue(cmd.returnVal);\n    query->addBindValue(cmd.startTime);\n    query->addBindValue(cmd.endTime);\n    query->addBindValue(cmd.workingDirectory);\n    query->addBindValue(cmd.sessionInfo.uuid);\n    query->exec();\n\n    return qVariantTo_throw<qint64>(query->lastInsertId());\n}\n\n\n\n/// update only relevant command fields, which are those that are not\n/// known from the beginning.\nvoid db_controller::updateCommand(const CommandInfo &cmd)\n{\n    assert(cmd.idInDb != db::INVALID_INT_ID);\n    auto query = db_connection::mkQuery();\n\n    query->prepare(\"update cmd set txt=?,returnVal=?,startTime=?,endTime=? \"\n                   \"where `id`=?\");\n    query->addBindValue(cmd.text);\n    query->addBindValue(cmd.returnVal);\n    query->addBindValue(cmd.startTime);\n    query->addBindValue(cmd.endTime);\n    query->addBindValue(cmd.idInDb);\n\n    query->exec();\n\n}\n\n\n/// Add file events belonging to param cmd which must belong to a valid\n/// database entry (idInDb must valid)\nvoid db_controller::addFileEvents(const CommandInfo &cmd, FileEvents &fileEvents)\n{\n    assert(cmd.idInDb != db::INVALID_INT_ID);\n    assert(ftell(fileEvents.file()) == 0);\n\n    auto query = db_connection::mkQuery();\n    query->transaction();\n\n    query->prepare(\"select envId,hashmetaId from cmd where `id`=?\");\n    query->addBindValue(cmd.idInDb);\n    query->exec();\n    query->next(true);\n    const QVariant envId = query->value(0);\n    const QVariant hashMetaId = query->value(1);\n\n    FileEvent* e;\n    uint counter = 0;\n    InterruptProtect ip(SIGTERM);\n    while ((e = fileEvents.read()) != nullptr) {\n        if(FileEvents::isReadEvent(e->flags())){\n            insertFileReadEvent(query, cmd, envId, hashMetaId, e);\n        }\n        if(FileEvents::isWriteEvent(e->flags())){\n            insertFileWriteEvent(query, cmd, e);\n        }\n        // Be 'nice' to others and sleep a bit every now and then\n        if(++counter % 500 == 0 &&\n           ! ip.signalOccurred()){ // if we shall terminate don't sleep.\n            query->commit();\n            usleep(10 * 1000);\n            query->transaction();\n        }\n    }\n}\n\n\n/// Deletes the command and corresponding file events (read and write).\n/// @param sqlQuery: may only refer to columns of the 'cmd'-table.\n/// @returns numRowsAffected\nint db_controller::deleteCommand(const SqlQuery &sqlQuery)\n{\n    auto query = db_connection::mkQuery();\n    query->transaction();\n\n    logDebug << \"deleting cmd\" << sqlQuery.query();\n    query->prepare(\"delete from cmd where \" + sqlQuery.query());\n    query->addBindValues(sqlQuery.values());\n\n    query->exec();\n    int numRowsAffected = query->numRowsAffected();\n    // the respective triggers have also caused the deletion of orphans in\n    // writtenFile, readFileCmd, etc., however, we still need to handle childless parents:\n    deleteChildlessParents(query);\n    return numRowsAffected;\n}\n\n\n/// @param reverseResultIter: if true, the returned Iterator will traverse the resultset in\n/// reverse order on continous 'next'-calls.\nstd::unique_ptr<CommandQueryIterator>\ndb_controller::queryForCmd(const SqlQuery &sqlQ, bool reverseResultIter){\n    auto pQuery = db_connection::mkQuery();\n    std::unique_ptr<CommandQueryIterator> cmdIter(\n                new CommandQueryIterator(pQuery, reverseResultIter));\n\n    const QString queryStr =\n            \"select cmd.id,cmd.txt,\"\n            \"cmd.returnVal,cmd.startTime,cmd.endTime,cmd.workingDirectory,\"\n            \"session.id,session.comment,\"\n            \"hashmeta.chunkSize,hashmeta.maxCountOfReads,\"\n            \"env.username,env.hostname \"\n            \"from cmd \"             +\n            QString((sqlQ.containsTablename(\"writtenFile\") ||\n                     sqlQ.containsTablename(\"writtenFile_path\")) ? // an alias\n                        \"join writtenFile on cmd.id=writtenFile.cmdId \"\n                        \"join pathtable as writtenFile_path \"\n                        \"on writtenFile.pathId=writtenFile_path.id \" : \"\") +\n            QString((sqlQ.containsTablename(\"readFile\") ||\n                     sqlQ.containsTablename(\"readFile_path\")) ?\n                        \"join readFileCmd on cmd.id=readFileCmd.cmdId \"\n                        \"join readFile on readFileCmd.readFileId=readFile.id \"\n                        \"join pathtable as readFile_path \"\n                        \"on readFile.pathId=readFile_path.id \" :\n                        \"\") +\n            \"join env on cmd.envId=env.id \"\n            \"left join hashmeta on hashmeta.id=cmd.hashmetaId \" // left joins last, if possible!\n            \"left join `session` on cmd.sessionId=session.id \"\n            \"where \";\n\n    // do not change this -> order matters in html-plot...\n    const QString orderBy = \"order by cmd.startTime \" + sqlQ.ascendingStr() +\n                            sqlQ.mkLimitString();\n\n    // we need the size (at other places) but QSQLITE does not support QSqlQuery::size.\n    // To use a workaround, forward mode must not be enabled.\n    // See also https://stackoverflow.com/a/26500811/7015849\n    // if( ! reverseResultIter){\n    //     pQuery->setForwardOnly(true);\n    // }\n    const QString fullQuery = queryStr + sqlQ.query() + \" group by cmd.id \" + orderBy;\n    pQuery->prepare(fullQuery);\n    pQuery->addBindValues(sqlQ.values());\n    logDebug << \"executing\" << fullQuery;\n    pQuery->exec();\n\n    if(reverseResultIter){\n        // place cursor right after the last record, so a call to \"previous\" points to last.\n        pQuery->last();\n        pQuery->next();\n    }\n    return cmdIter;\n}\n\n/// if no entry can be found, the id of the returned file info is invalid.\nFileReadInfo db_controller::queryReadInfo_byId(const qint64 id, const QueryPtr& query_)\n{\n    SqlQuery sqlQ;\n    sqlQ.addWithAnd(\"readFile.id\", id);\n    auto fileReadInfos = queryFileReadInfos(sqlQ, query_);\n    if(fileReadInfos.isEmpty()){\n        return FileReadInfo();\n    }\n    assert(fileReadInfos.size() == 1);\n    return fileReadInfos.first();\n}\n\nFileReadInfos db_controller::queryReadInfos_byCmdId(qint64 cmdId, const QueryPtr &query_)\n{\n    SqlQuery sqlQ;\n    sqlQ.addWithAnd(\"cmdId\", cmdId);\n    return queryFileReadInfos(sqlQ, query_,\n                              \" join readFileCmd on \"\n                              \"readFile.id=readFileCmd.readFileId \");\n}\n\n\n/// @param restrictingFilesize: only return hashmeta-entries for which at least one file\n/// exists which was recorded using a given hashmeta and whose size is exactly that.\n/// Set to -1 to return all HashMeta entries.\n/// @param isReadFile: if true, consider read files, else written. Only used, if\n/// restrictingFilesize!=-1\ndb_controller::HashMetas\ndb_controller::queryHashmetas(qint64 restrictingFilesize, bool isReadFile){\n    const QString FIELDS = \" chunkSize,maxCountOfReads,hashmeta.id \";\n\n    QString sql;\n    if(restrictingFilesize == -1){\n        sql = \"select \"+FIELDS+\" from `hashmeta`\";\n    } else {\n        sql = (isReadFile) ?\n              \"select \"+FIELDS+\" from `readFile` \"\n              \"left join hashmeta on readFile.hashmetaId=hashmeta.id \"\n              \"where readFile.size=? \"\n              \"group by chunkSize,maxCountOfReads \"\n            :\n              \"select \"+FIELDS+\" from cmd \"\n              \"join `writtenFile` on cmd.id=writtenFile.cmdId \"\n              \"left join hashmeta on cmd.hashmetaId=hashmeta.id \"\n              \"where writtenFile.size=? \"\n              \"group by chunkSize,maxCountOfReads \";\n    }\n    auto query = db_connection::mkQuery();\n    query->prepare(sql);\n    query->addBindValue(restrictingFilesize);\n    query->exec();\n    db_controller::HashMetas hashMetas;\n    bool noHashAdded = false;\n    while(query->next()){\n        if( query->value(0).isNull()){\n            if( ! noHashAdded){\n                hashMetas.push_back(HashMeta());\n                noHashAdded = true;\n            }\n        } else {\n            HashMeta h;\n            qVariantTo_throw(query->value(0), &h.chunkSize);\n            qVariantTo_throw(query->value(1), &h.maxCountOfReads);\n            qVariantTo_throw(query->value(2), &h.idInDb);\n            hashMetas.push_back(h);\n        }\n    }\n    return hashMetas;\n}\n\n/// Find the database id of a given hasmeta entry (by chunkSize and maxCountOfReads)\nqint64\ndb_controller::queryHashmetaId(const HashMeta &hashMeta )\n{\n    qint64 idIndDb = db::INVALID_INT_ID;\n    auto query = db_connection::mkQuery();\n    query->prepare(\"select `id` from hashmeta where \"\n                   \"chunkSize=? and maxCountOfReads=?\");\n    query->addBindValue(hashMeta.chunkSize);\n    query->addBindValue(hashMeta.maxCountOfReads);\n    query->exec();\n    if(query->next()){\n        qVariantTo_throw(query->value(0), &idIndDb);\n    }\n    return idIndDb;\n}\n\n"
  },
  {
    "path": "src/common/database/db_controller.h",
    "content": "#pragma once\n\n#include <QByteArray>\n#include <QVector>\n#include <memory>\n\n#include \"fileevents.h\"\n#include \"commandinfo.h\"\n#include \"sqlquery.h\"\n#include \"db_connection.h\"\n#include \"qsqlquerythrow.h\"\n#include \"command_query_iterator.h\"\n\n\nnamespace db_controller {\n\ntypedef QVector<HashMeta> HashMetas;\n\nqint64 addCommand(const CommandInfo &cmd);\nvoid updateCommand(const CommandInfo &cmd);\n\nvoid addFileEvents(const CommandInfo &cmd, FileEvents& fileEvents);\n\nint deleteCommand(const SqlQuery &query);\n\nstd::unique_ptr<CommandQueryIterator> queryForCmd(const SqlQuery& sqlQ, bool reverseResultIter=false);\n\nFileReadInfo queryReadInfo_byId(qint64 id, const QueryPtr& query_=nullptr);\nFileReadInfos queryReadInfos_byCmdId(qint64 cmdId, const QueryPtr& query_=nullptr);\n\nHashMetas queryHashmetas(qint64 restrictingFilesize=-1, bool isReadFile=false);\nqint64 queryHashmetaId(const HashMeta&);\n\n}\n\n\n\n\n\n\n\n"
  },
  {
    "path": "src/common/database/db_conversions.cpp",
    "content": "\n#include <QDateTime>\n#include \"db_conversions.h\"\n#include \"util.h\"\n\nQVariant db_conversions::fromMtime(time_t mtime)\n{\n    return QVariant(QDateTime::fromTime_t(static_cast<uint>(mtime)));\n}\n\n/// sqlite cannot store uint64 as int - store as blob instead.\nQVariant db_conversions::fromHashValue(const HashValue &val)\n{\n    QByteArray hashBytes(val.isNull() ? \"\" : qBytesFromVar(val.value()));\n    return  { hashBytes };\n}\n\nHashValue db_conversions::toHashValue(const QVariant &var)\n{\n    QByteArray hashBytes = var.toByteArray();\n    if(hashBytes.isEmpty()){\n        return {};\n    }\n    return varFromQBytes<HashValue::value_type>(hashBytes);\n}\n"
  },
  {
    "path": "src/common/database/db_conversions.h",
    "content": "#pragma once\n\n#include <QVariant>\n#include <ctime>\n\n#include \"nullable_value.h\"\n\nnamespace db_conversions {\n    QVariant fromMtime(time_t mtime);\n    // Not toMtime, because we work with QDateTime afterwards\n\n    QVariant fromHashValue(const HashValue& val);\n    HashValue toHashValue(const QVariant& var);\n\n\n}\n\n\n"
  },
  {
    "path": "src/common/database/db_globals.cpp",
    "content": "#include \"db_globals.h\"\n\n// SQLITE begins int-ids at one.\nconst qint64 db::INVALID_INT_ID = 0;\n"
  },
  {
    "path": "src/common/database/db_globals.h",
    "content": "#pragma once\n\n#include <QtGlobal>\n\nnamespace db {\n\nextern const qint64 INVALID_INT_ID;\n\n}\n"
  },
  {
    "path": "src/common/database/file_query_helper.cpp",
    "content": "\n#include <QDateTime>\n#include <QDebug>\n#include <vector>\n\n#include \"db_controller.h\"\n#include \"db_conversions.h\"\n#include \"db_globals.h\"\n#include \"exccommon.h\"\n#include \"file_query_helper.h\"\n#include \"hashcontrol.h\"\n#include \"logger.h\"\n#include \"os.h\"\n#include \"qfilethrow.h\"\n#include \"query_columns.h\"\n#include \"settings.h\"\n#include \"translation.h\"\n\nusing std::vector;\nusing db_controller::QueryColumns;\nusing namespace db_conversions;\n\nstruct FileQueryColumns {\n    FileQueryColumns(bool readFile) : readFile(readFile){ // readFile, else writtenFile\n        QueryColumns& c = QueryColumns::instance();\n        if(readFile){\n            col_hash  = c.rFile_hash;\n            col_size  = c.rFile_size;\n            col_mtime = c.rFile_mtime;\n            col_hashMetaId = c.rFile_hashmetaId;\n        } else {\n            col_hash  = c.wFile_hash;\n            col_size  = c.wFile_size;\n            col_mtime = c.wFile_mtime;\n            col_hashMetaId = c.cmd_hashmetaId;\n        }\n    }\n\n    QString col_hash;\n    QString col_size;\n    QString col_mtime;\n    QString col_hashMetaId;\n    bool readFile;\n};\n\n/// Strictly speaking a given hash for a file is\n/// only valid in combination with it's hash-settings as those determine what\n/// parts of a file to hash.\nstruct HashMetaValuePair {\n    HashMeta meta;\n    HashValue value;\n};\n\n\nstatic void\naddToHashQuery(SqlQuery& query, const HashValue& hashVal,\n               const QVariant& hashMetaId, const FileQueryColumns& c){\n    // Hash-values and hashmeta are paired, so e.g.\n    // (hashval=123 and hashMetaId=1) or (hashval=35 and hashMetaId=2).\n    // If we query for a command, where hashing was disabled, the hash\n    // naturally has be null as well.\n    if(hashMetaId.isNull() && ! hashVal.isNull()){\n        throw QExcProgramming(\"hashMetaId.isNull && ! hashVal.isNull\");\n    }\n    SqlQuery subquery;\n    subquery.addWithAnd(c.col_hash, fromHashValue(hashVal));\n    subquery.addWithAnd(c.col_hashMetaId, hashMetaId);\n    query.addWithOr(subquery);\n}\n\nstatic void\naddToHashQuery(SqlQuery& query, const HashValue& hashVal,\n               const HashMeta& hashMeta, const FileQueryColumns& c){\n    QVariant hashMetaId;\n    if(! hashMeta.isNull()){\n        hashMetaId = hashMeta.idInDb;\n    }\n    addToHashQuery(query, hashVal, hashMetaId, c);\n}\n\nstatic void\naddToHashQuery(SqlQuery& query, const HashMetaValuePair& p,\n               const FileQueryColumns& c){\n    addToHashQuery(query, p.value, p.meta, c);\n}\n\nstatic void\naddToHashQuery(SqlQuery& query,\n               const vector<HashMetaValuePair>& hashMetaValuePairs,\n               const FileQueryColumns& c){\n    for(const auto& p : hashMetaValuePairs){\n         addToHashQuery(query, p, c);\n    }\n}\n\n\n\n\n/// Generate all necessary HashMeta-HashValue-pairs for the given fd,\n/// ignoring a possibly existing knownPair.\nstatic vector<HashMetaValuePair>\ngenerateHashMetaValuePairs(QFileThrow& file, qint64 filesize,\n                           const FileQueryColumns* c=nullptr,\n                           const HashMetaValuePair* knownPair=nullptr){\n    if(filesize == 0){\n        return {};\n    }\n    const auto hashMetas = (c==nullptr) ?\n                db_controller::queryHashmetas() :\n                db_controller::queryHashmetas(filesize, c->readFile);\n\n    HashControl hashCtrl;\n    vector<HashMetaValuePair> pairs;\n    for(const auto& hashMeta : hashMetas){\n        HashValue hashVal;\n        if(! hashMeta.isNull()){\n            if(knownPair != nullptr && hashMeta.idInDb == knownPair->meta.idInDb){\n                // already got this one.\n                continue;\n            }\n            hashVal = hashCtrl.genPartlyHash(file.handle(), filesize, hashMeta);\n            if(hashVal.isNull()){\n                throw QExcIo(qtr(\"file %1 - failed to hash, although it \"\n                                  \"was not empty.\").arg(file.fileName()));\n            }\n        }\n        pairs.push_back({hashMeta, hashVal});\n    }\n    return pairs;\n}\n\nstatic bool\nentriesExists(const SqlQuery& query){\n    return db_controller::queryForCmd(query)->next();\n}\n\n\n\n/// Typically, changing hash-settings is a rare event,\n/// so optimistically generate a hash using current settings (if\n/// enabled)\nstatic HashMetaValuePair\ngoodLuckHashAttempt(QFileThrow& file, const qint64 size){\n    if(size == 0){\n        return {};\n    }\n    const auto &sets = Settings::instance();\n    auto hashMeta = sets.hashSettings().hashMeta;\n    HashValue hashVal;\n    HashControl hashCtrl;\n    if(! sets.hashSettings().hashEnable || hashMeta.isNull()){\n        return {};\n    }\n    hashMeta.idInDb = db_controller::queryHashmetaId(hashMeta);\n    if(hashMeta.idInDb == db::INVALID_INT_ID){\n        logDebug << \"unusual event: hashmeta settings not found in db\";\n        return {};\n    }\n\n    hashVal = hashCtrl.genPartlyHash(file.handle(), size, hashMeta);\n    if(hashVal.isNull()){\n        // no need to print a warning here - is caught in\n        // generateHashMetaValuePairs\n        return {};\n    }\n    HashMetaValuePair p;\n    p.meta = hashMeta;\n    p.value = hashVal;\n    return p;\n}\n\n\n/// Build a database-query based on the following file-attributes, which are collected\n/// automatically: size, hash and mtime. The query attempts to be \"smart\", by\n/// preferably returning more likely matches, lowering the strictness if nothing\n/// is found.\n/// It is looked preliminarily, if a file exactly matching the specs and using *only*\n/// the current hash-settings exists. If nothing is found, other hash-settings (if any)\n/// are used to calculate the other hashes.\n/// If no entry was found, the query is set to ignore the mtime.\n/// Empty files (size==0) are a special case: their hash is always null, so we can\n/// ommit the hashing altogether. Over the time quite a number of empty files\n/// may exist, so in this case we do **not** ignore the mtime and return 100k results.\n/// In legacy shournal versions, for written files an mtime\n/// not later than the file's current\n/// mtime was set in the assumption that changing the mtime afterwards should\n/// only increase it. However, for example wget uses the\n/// «Last-Modified header» for HTTP if available,\n/// which is naturally older than the system's mtime of the just downloaded\n/// file.\n/// @param filename: existing file, where attributes are collected from\n/// @param readFile: if true, query for read files, else for written files\nSqlQuery\nfile_query_helper::buildFileQuerySmart(const QString &filename, bool readFile)\n{\n    FileQueryColumns c(readFile);\n    QFileThrow file(filename);\n    file.open(QFile::OpenModeFlag::ReadOnly);\n    auto st_ = os::fstat(file.handle());\n    const QVariant mtimeVar = fromMtime(st_.st_mtime);\n    const qint64 size = st_.st_size;\n\n    if(size == 0){\n        SqlQuery query;\n        query.addWithAnd(c.col_size, size);\n        query.addWithAnd(c.col_mtime, mtimeVar);\n        return query;\n    }\n\n    vector<HashMetaValuePair> hashMetaValuePairs;\n    auto firstHashRes = goodLuckHashAttempt(file, size);\n    if(! firstHashRes.meta.isNull()){\n        SqlQuery query;\n        addToHashQuery(query, firstHashRes, c);\n        query.addWithAnd(c.col_size, size);\n        query.addWithAnd(c.col_mtime, mtimeVar);\n        if(entriesExists(query)){\n            return query;\n        }\n    }\n    // Our goodluck first attempt failed (bad size, hash or mtime) - now query based\n    // on all other hashMetaValuePairs\n    hashMetaValuePairs = generateHashMetaValuePairs(file, size, &c, &firstHashRes);\n    if(firstHashRes.meta.isNull() && hashMetaValuePairs.empty()){\n        logDebug << filename << \"no file with matching size exists\";\n        return mkInertSqlQuery();\n    }\n\n    if(! hashMetaValuePairs.empty()){\n        SqlQuery query;\n        addToHashQuery(query, hashMetaValuePairs, c);\n        query.addWithAnd(c.col_size, size);\n        query.addWithAnd(c.col_mtime, mtimeVar);\n        if(entriesExists(query)){\n            return query;\n        }\n    }\n    // We failed to find a match with exact mtime, so, finally, perform the\n    // query ignoring the mtime. Note that our first hash-result (if any)\n    // is not part of hashMetaValuePairs yet.\n    if(! firstHashRes.meta.isNull()){\n        hashMetaValuePairs.push_back(firstHashRes);\n    }\n    logDebug << \"will perform query on\" << filename << \"ignoring mtime\";\n    SqlQuery query;\n    addToHashQuery(query, hashMetaValuePairs, c);\n    query.addWithAnd(c.col_size, size);\n    return query;\n}\n\n\n/// Build a database-query based on the following file-attributes, which are collected\n/// automatically: size, hash or mtime. Any combination of those can be used.\n/// @param filename: existing file, where attributes are collected from\n/// @param readFile: if true, query for read files, else for written file\nSqlQuery file_query_helper::buildFileQuery(const QString &filename,\n                                   bool readFile,\n                                   bool use_mtime, bool use_hash, bool use_size)\n{\n    FileQueryColumns c(readFile);\n    SqlQuery query;\n\n    QFileThrow file(filename);\n    file.open(QFile::OpenModeFlag::ReadOnly);\n    const auto st_ = os::fstat(file.handle());\n    const QVariant mtimeVar = fromMtime(st_.st_mtime);\n    const qint64 size = st_.st_size;\n\n    if(use_mtime) query.addWithAnd(c.col_mtime, mtimeVar);\n    if(use_hash){\n        if(size == 0){\n            if(! use_mtime && ! use_size){\n                logWarning << qtr(\"File %1 is empty, so hash-only queries are \"\n                                  \"not possible.\").arg(filename);\n                return mkInertSqlQuery();\n            }\n        } else {\n            auto hashMetaValuePairs = generateHashMetaValuePairs(\n                        file, use_size,\n                        (use_size)? &c : nullptr);\n            SqlQuery hashQuery;\n            addToHashQuery(hashQuery, hashMetaValuePairs, c);\n            query.addWithAnd(hashQuery);\n        }\n    }\n    if(use_size) query.addWithAnd(c.col_size, qint64(size));\n    return query;\n}\n"
  },
  {
    "path": "src/common/database/file_query_helper.h",
    "content": "#pragma once\n\n#include <QFile>\n\n#include \"sqlquery.h\"\n#include \"nullable_value.h\"\n#include \"fileinfos.h\"\n\nnamespace file_query_helper {\n    SqlQuery buildFileQuerySmart(const QString& filename, bool readFile);\n    SqlQuery buildFileQuery(const QString& filename, bool readFile,\n                              bool use_mtime, bool use_hash, bool use_size);\n}\n\n\n"
  },
  {
    "path": "src/common/database/fileinfos.cpp",
    "content": "#include \"fileinfos.h\"\n\n#include \"db_conversions.h\"\n#include \"commandinfo.h\"\n#include \"hashcontrol.h\"\n#include \"logger.h\"\n#include \"qfilethrow.h\"\n#include \"util.h\"\n\nFileInfo::~FileInfo() {};\n\nQString FileInfo::currentStatus(const CommandInfo &cmd) const\n{\n    auto filename = pathJoinFilename(this->path, this->name);\n    try{\n        QFileThrow f(filename);\n        if(!f.exists()){\n            return \"N\";\n        }\n        HashControl hashCtrl;\n        f.open(QFile::OpenModeFlag::ReadOnly);\n        const auto st_ = os::fstat(f.handle());\n        if(size != st_.st_size ||\n           QDateTime::fromTime_t(static_cast<uint>(st_.st_mtime))!= mtime ||\n           hash!= hashCtrl.genPartlyHash(f.handle(), st_.st_size, cmd.hashMeta, false)){\n            return \"M\";\n        }\n        return \"U\";\n    } catch (const std::exception& ex) {\n        logWarning << qtr(\"Failed to determine status of file %1 - %2\").arg(filename)\n                      .arg(QString(ex.what()));\n        return \"ERROR\";\n    }\n}\n\n\nvoid FileWriteInfo::write(QJsonObject &json) const\n{\n    json[\"id\"] = idInDb;\n    json[\"path\"] = pathJoinFilename(path, name);\n    json[\"size\"] = size;\n    json[\"mtime\"] = QJsonValue::fromVariant(mtime);\n    json[\"hash\"] = QJsonValue::fromVariant(QVariant::fromValue(hash));\n}\n\nbool\nFileWriteInfo::operator==(const FileInfo &rhs) const\n{\n    if(idInDb != db::INVALID_INT_ID && rhs.idInDb != db::INVALID_INT_ID){\n        return idInDb == rhs.idInDb;\n    }\n    return mtime == rhs.mtime &&\n            size == rhs.size &&\n            path == rhs.path &&\n            name == rhs.name &&\n            hash == rhs.hash;\n}\n\n////////////////////////////////////////////////////////////\n\nvoid FileReadInfo::write(QJsonObject &json) const\n{\n    json[\"id\"] = idInDb;\n    json[\"path\"] = pathJoinFilename(path, name);\n    json[\"size\"] = size;\n    json[\"mtime\"] = QJsonValue::fromVariant(mtime);\n    // Note: in case of a non-null hash, this results in a quoted string.\n    // While useful in the html-export (javascript INT-limit..), technically this is not totally\n    // correct. However, it has always been so, so do not change.\n    json[\"hash\"] = QJsonValue::fromVariant(QVariant::fromValue(hash));\n    json[\"isStoredToDisk\"] = isStoredToDisk;\n}\n\nbool\nFileReadInfo::operator==(const FileReadInfo &rhs) const\n{\n    if(idInDb != db::INVALID_INT_ID && rhs.idInDb != db::INVALID_INT_ID){\n        return idInDb == rhs.idInDb;\n    }\n\n    return mtime == rhs.mtime &&\n            size == rhs.size &&\n            path == rhs.path &&\n            name == rhs.name &&\n            mode == rhs.mode &&\n            hash == rhs.hash;\n}\n\nbool FileReadInfo::operator==(const FileInfo&) const\n{\n    throw QExcProgramming(\"Unimplemented FileReadInfo::operator==(const FileInfo &rhs)\");\n}\n"
  },
  {
    "path": "src/common/database/fileinfos.h",
    "content": "#pragma once\n\n#include <QString>\n#include <QDateTime>\n#include <QJsonObject>\n\n#include \"nullable_value.h\"\n#include \"db_globals.h\"\n\nstruct CommandInfo;\n\nstruct FileInfo {\n    virtual ~FileInfo() = 0;\n\n    qint64 idInDb { db::INVALID_INT_ID };\n\n    QDateTime mtime;\n    qint64    size {};\n    QString   path;\n    QString   name;\n    HashValue  hash;\n\n    virtual QString currentStatus(const CommandInfo &cmd) const;\n    virtual void write(QJsonObject &json) const = 0;\n    virtual bool operator==(const FileInfo& rhs) const = 0 ;\n};\n\nstruct FileWriteInfo : public FileInfo\n{\n\n    virtual void write(QJsonObject &json) const;\n    virtual bool operator==(const FileInfo& rhs) const;\n\n};\n\n\nstruct FileReadInfo : public FileInfo\n{\n    mode_t mode {};\n    bool isStoredToDisk {false};\n\n    virtual void write(QJsonObject &json) const;\n\n    virtual bool operator==(const FileReadInfo& rhs) const;\n    virtual bool operator==(const FileInfo& rhs) const;\n};\n"
  },
  {
    "path": "src/common/database/insertifnotexist.cpp",
    "content": "\n\n#include \"insertifnotexist.h\"\n\n#include \"qsqlquerythrow.h\"\n\ndb_controller::InsertIfNotExist::InsertIfNotExist(QSqlQueryThrow &parentQuery,\n                                                  const QString &tablename) :\n    m_query(parentQuery),\n    m_tablename(tablename)\n{}\n\n\nvoid db_controller::InsertIfNotExist::addSimple(const QString &colname, const QVariant &value)\n{\n    InsertIfNotExist::addEntry(colname, {value}, \"?\");\n}\n\n/// Add a columnname-value pair for the prospective selcet/insert queuey.\n/// @param colname: Column-name.\n/// @param values: value-list for QSqlQuery::addBindValue. In most cases\n/// only one value is supplied, however, multiple values are possible to\n/// allow for sub-queries.\n/// @param placeholder: In simple cases the placeholder is ?, e.g.\n/// «colname is ?», however, for sub-queries this may expand to e.g.\n/// «(select id from table2 where foo is ? and bar is ?)».\nvoid db_controller::InsertIfNotExist::\naddEntry(const QString &colname, const QVariantList &values,\n         const QString &placeholder)\n{\n    InsertIfNotExistEntry e;\n    e.colname = colname;\n    e.placeholder = placeholder;\n    m_entries.push_back(e);\n    for(const auto& val : values){\n        m_values.push_back(val);\n    }\n}\n\n/// Execute the insert-if-not exist query using the\n/// previously added column-value-pairs.\n/// @param existed: If non-null, set it to true, if\n/// the entry already existed (so no insert was necessary).\n/// @return: the existing or newly created id\nQVariant db_controller::InsertIfNotExist::exec(bool *existed)\n{\n    QString query = \"select id from \" + m_tablename + \" where \";\n    bool first = true;\n    for(const auto& entry : m_entries){\n        if(! first){\n            query += \" and \";\n        }\n        first = false;\n        query += entry.colname + \" is \" + entry.placeholder;\n    }\n    m_query.prepare(query);\n    m_query.addBindValues(m_values);\n    m_query.exec();\n\n    bool nextSuccess = m_query.next();\n\n    if(existed != nullptr){\n        *existed = nextSuccess;\n    }\n    if(nextSuccess){\n        return m_query.value(0);\n    }\n\n    // record did not exist, insert it\n    query = \"insert into \" + m_tablename + \" (\";\n    first = true;\n    QString placeholders('(');\n    for(const auto& entry : m_entries){\n        if(! first){\n            query += ',';\n            placeholders += ',';\n        }\n        first = false;\n        query += entry.colname;\n        placeholders += entry.placeholder;\n    }\n    query += ')';\n    placeholders += ')';\n\n    query += \" values \" + placeholders;\n\n    m_query.prepare(query);\n    m_query.addBindValues(m_values);\n    m_query.exec();\n    return m_query.lastInsertId();\n}\n"
  },
  {
    "path": "src/common/database/insertifnotexist.h",
    "content": "\n#pragma once\n\n#include <QVariant>\n#include <QVector>\n\n\nclass QSqlQueryThrow;\n\nnamespace db_controller {\n\n/// Insert values in a sql table, if these values do not already exist.\n/// Requires an existing column named `id`.\n/// If the value-combination does not exist, the insert-operation inserts\n/// these values.\nclass InsertIfNotExist {\npublic:\n\n    InsertIfNotExist(QSqlQueryThrow& parentQuery,\n                     const QString& tablename);\n\n    void addSimple(const QString& colname, const QVariant& value);\n    void addEntry(const QString& colname, const QVariantList& values,\n                  const QString& placeholder);\n\n    QVariant exec(bool* existed=nullptr);\n\nprivate:\n    struct InsertIfNotExistEntry {\n         QString colname;\n         QString placeholder;\n    };\n\n    QSqlQueryThrow& m_query;\n    QString m_tablename;\n    QVector<InsertIfNotExistEntry> m_entries;\n    QVariantList m_values;\n};\n\n\n}\n"
  },
  {
    "path": "src/common/database/qexcdatabase.cpp",
    "content": "\n#include \"qexcdatabase.h\"\n\n\n\n\nQExcDatabase::QExcDatabase(const QString &preamble, const QSqlError &err) :\n    QExcCommon (preamble)\n{\n    if(! descrip().isEmpty()){\n        setDescrip( descrip() + \": \");\n    }\n    setDescrip( descrip() + err.text()\n                + '('+ err.nativeErrorCode() + ')');\n}\n\nQExcDatabase::QExcDatabase(const QString &preamble) :\n    QExcCommon (preamble)\n{\n\n}\n"
  },
  {
    "path": "src/common/database/qexcdatabase.h",
    "content": "\n#include <QSqlError>\n\n#include \"exccommon.h\"\n\n\nclass QExcDatabase : public QExcCommon\n{\npublic:\n     QExcDatabase(const QString & preamble,\n                 const QSqlError & err);\n     QExcDatabase(const QString & preamble);\n\n};\n"
  },
  {
    "path": "src/common/database/qsqlquerythrow.cpp",
    "content": "#include <QMap>\n#include <QVariant>\n#include <QHash>\n#include <cassert>\n\n#include \"logger.h\"\n#include \"osutil.h\"\n#include \"qsqlquerythrow.h\"\n\n#include \"qexcdatabase.h\"\n#include \"util.h\"\n\nenum SQLITE_ERR { SQLITE_ERR_BUSY= 5 };\n\n//\n// QSqlQueryThrow::QSqlQueryThrow(QSqlResult *r)\n//     :QSqlQuery (r),\n//       m_execWasCalled(false)\n// {}\n//\n// QSqlQueryThrow::QSqlQueryThrow(const QString &query, const QSqlDatabase& db)\n//     :QSqlQuery(query, db),\n//       m_execWasCalled(false)\n// {}\n//\n\nstatic QString mkInsertIgnorePreamble(const QString& driverName)\n{\n    static const QHash<QString, QString> preambles {\n        {\"QSQLITE\", \"insert or ignore\"}\n    };\n    auto it = preambles.find(driverName);\n    if(it != preambles.end()){\n        return it.value();\n    }\n    return \"insert ignore\";\n}\n\nstatic int sqlerrToNumber(const QSqlError & err){\n    try {\n        return qVariantTo_throw<int>(err.nativeErrorCode());\n    } catch (ExcQVariantConvert& ex) {\n        ex.setDescrip(\"Failed to convert Sqlerror to number - \" +\n                      ex.descrip());\n        throw;\n    }\n}\n\nQSqlQueryThrow::QSqlQueryThrow(const QSqlDatabase& db)\n    : QSqlQuery (db),\n      m_insertIgnorePreamble(mkInsertIgnorePreamble(db.driverName())),\n      m_execWasCalled(false),\n      m_withinTransaction(false)\n{}\n\nQSqlQueryThrow::~QSqlQueryThrow()\n{\n    if(! m_withinTransaction){\n        return;\n    }\n    try {\n        if (std::uncaught_exception()) {\n            this->rollback();\n        } else {\n            this->commit();\n        }\n    } catch (const std::exception& e) {\n        fprintf(stderr, \"%s: %s\\n\", __func__, e.what());\n    }\n\n}\n\nvoid QSqlQueryThrow::exec()\n{\n    this->_doExec(QString());\n}\n\nvoid QSqlQueryThrow::exec(const QString &query)\n{\n    this->_doExec(query);\n}\n\nvoid QSqlQueryThrow::prepare(const QString &query)\n{\n    if(! QSqlQuery::prepare(query)){\n        throw QExcDatabase(qtr(\"prepare <%1> failed\").arg(query), this->lastError());\n    }\n    m_execWasCalled = false;\n}\n\nbool QSqlQueryThrow::next(bool throwIfEmpty)\n{\n    if(! m_execWasCalled){\n        throw QExcDatabase(QString(\"%1 was called without previous exec \")\n                           .arg(__func__));\n    }\n    bool ret = QSqlQuery::next();\n    if(throwIfEmpty && ! ret){\n         throw QExcDatabase(qtr(\"The query %1 was expected to have (another) result which is \"\n                                \"not the case\").arg(this->lastQuery()));\n    }\n    return ret;\n}\n\nvoid QSqlQueryThrow::addBindValues(const QVariantList &vals)\n{\n    for(const auto& val : vals){\n        this->addBindValue(val);\n    }\n}\n\n\n/// Note: while qt's QSqlDatabase starts transactions in SQLITE in 'deferred' mode,\n/// we rather choose 'immediate'. See also\n/// https://www.sqlite.org/lang_transaction.html\n/// and\n/// https://stackoverflow.com/a/1063768\n/// for the rationale.\nvoid QSqlQueryThrow::transaction()\n{\n    assert(! m_withinTransaction);\n    this->exec(\"BEGIN IMMEDIATE\");\n    m_withinTransaction = true;\n}\n\nvoid QSqlQueryThrow::commit()\n{\n    assert(m_withinTransaction);\n    m_withinTransaction = false;\n    this->exec(\"COMMIT\");\n}\n\nvoid QSqlQueryThrow::rollback()\n{\n    assert(m_withinTransaction);\n    m_withinTransaction = false;\n    this->exec(\"ROLLBACK\");\n}\n\n/// QSQLITE does not support size(), this is a workaround\n/// which only works if forwardOnly is false.\nint QSqlQueryThrow::computeSize()\n{\n    if(this->isForwardOnly()){\n        throw QExcDatabase(qtr(\"attempted to compute size although forwardOnly \"\n                               \"is enabled.\"));\n    }\n    // see also https://stackoverflow.com/a/26500811/7015849\n    const int initialPos = this->at();    \n    const int size = (this->last()) ? this->at() + 1 : 0;\n    // restore initial pos\n    switch (initialPos) {\n    case QSql::BeforeFirstRow:\n        this->first();\n        this->previous();\n        break;\n    case QSql::AfterLastRow:\n        this->last();\n        this->next();\n        break;\n    default:\n        this->seek(initialPos);\n        break;\n    }\n    return size;\n\n}\n\n\nQString QSqlQueryThrow::generateExcMsgExec(const QString &queryStr)\n{\n    QStringList vals;\n    for(const auto& entry : this->boundValues()){\n        vals.push_back(entry.value<QVariant>().toString());\n    }\n\n    QString valStr;\n    if(! vals.isEmpty()){\n        valStr = \" with values <\" + vals.join(\", \") + \">\";\n    }\n\n    QString msg = \"exec <\" + queryStr + \">\" + valStr + \" failed\";\n    return msg;\n}\n\nvoid QSqlQueryThrow::_doExec(const QString &query)\n{\n    for(int i=0; i<10; i++){\n        bool success = query.isEmpty() ? QSqlQuery::exec() : QSqlQuery::exec(query);\n        if(success){\n            m_execWasCalled = true;\n            return;\n        }\n        if(sqlerrToNumber(this->lastError()) == SQLITE_ERR_BUSY){\n            logInfo << \"Sqlquery failed with busy timeout. trying again in a \"\n                       \"few seconds:\" << (query.isEmpty()?this->lastQuery():query) ;\n            osutil::randomSleep(5 *1000, 20 *1000);\n        } else {\n            // throw immediatly (below)\n            break;\n        }\n    }\n    throw QExcDatabase(generateExcMsgExec(query.isEmpty()?this->lastQuery():query),\n                       this->lastError());\n}\n\nconst QString &QSqlQueryThrow::insertIgnorePreamble() const\n{\n    return m_insertIgnorePreamble;\n}\n"
  },
  {
    "path": "src/common/database/qsqlquerythrow.h",
    "content": "#pragma once\n\n#include <QSqlQuery>\n#include <QVariant>\n#include <QVector>\n\nclass QSqlQueryThrow : public QSqlQuery\n{\npublic:\n    // explicit QSqlQueryThrow(QSqlResult *r);\n    // explicit QSqlQueryThrow(const QString& query = QString(), const QSqlDatabase& db = QSqlDatabase());\n    explicit QSqlQueryThrow(const QSqlDatabase& db);\n    ~QSqlQueryThrow();\n    \n    void exec();\n    void exec(const QString& query);\n\n    void prepare(const QString& query);\n\n    bool next(bool throwIfEmpty=false);\n\n    void addBindValues(const QVariantList& vals);\n\n    void transaction();\n    void commit();\n    void rollback();\n\n    int computeSize();\n\npublic:\n    typedef QVector<QPair<const char*, QVariant> > ColnameValuePairs;\n\n    const QString& insertIgnorePreamble() const;\n\npublic:\n    // disable-copies: transactions cannot be copied...\n    QSqlQueryThrow(const QSqlQueryThrow &) = delete ;\n    void operator=(const QSqlQueryThrow &) = delete ;\n\n\nprivate:\n    QString generateExcMsgExec(const QString& queryStr);\n    void _doExec(const QString& query);\n    QString m_insertIgnorePreamble;\n    bool m_execWasCalled;\n    bool m_withinTransaction;\n};\n\n"
  },
  {
    "path": "src/common/database/query_columns.h",
    "content": "#pragma once\n\n#include <QString>\n#include \"util.h\"\n\nnamespace db_controller {\n\nclass QueryColumns {\npublic:\n    static QueryColumns& instance() {\n        static QueryColumns s_instance;\n        return s_instance;\n    }\n\n    const QString cmd_id {\"cmd.id\"};\n    const QString cmd_txt {\"cmd.txt\"};\n    const QString cmd_workingDir {\"cmd.workingDirectory\"};\n    const QString cmd_comment {\"cmd.comment\"};\n    const QString cmd_endtime {\"cmd.endTime\"};\n    const QString cmd_starttime {\"cmd.startTime\"};\n    const QString cmd_hashmetaId {\"cmd.hashmetaId\"};\n\n    const QString env_hostname {\"env.hostname\"};\n    const QString env_username {\"env.username\"};\n\n    const QString rFile_name {\"readFile.name\"};\n    const QString rFile_path {\"readFile_path.path\"}; // separate table, join alias\n    const QString rFile_mtime {\"readFile.mtime\"};\n    const QString rFile_size {\"readFile.size\"};\n    const QString rFile_hash  {\"readFile.hash\"};\n    const QString rFile_hashmetaId {\"readFile.hashmetaId\"};\n\n    const QString wFile_id    {\"writtenFile.id\"};\n    const QString wFile_name  {\"writtenFile.name\"};\n    const QString wFile_mtime {\"writtenFile.mtime\"};\n    const QString wFile_size  {\"writtenFile.size\"};\n    const QString wFile_hash  {\"writtenFile.hash\"};\n    const QString wFile_path  {\"writtenFile_path.path\"}; // separate table, join alias\n\n    const QString session_id {\"session.id\"};\n    const QString session_comment {\"session.comment\"};\n\nprivate:\n    QueryColumns() = default;\n\npublic:\n    ~QueryColumns() = default;\n    Q_DISABLE_COPY(QueryColumns)\n    DEFAULT_MOVE(QueryColumns)\n\n};\n\n}\n"
  },
  {
    "path": "src/common/database/sessioninfo.cpp",
    "content": "#include \"sessioninfo.h\"\n\n\n\nbool SessionInfo::operator==(const SessionInfo &rhs) const\n{\n    return uuid == rhs.uuid &&\n            comment == rhs.comment;\n}\n"
  },
  {
    "path": "src/common/database/sessioninfo.h",
    "content": "#pragma once\n\n#include <QString>\n\nstruct SessionInfo\n{\n    QByteArray uuid;\n    QString    comment;\n\n    bool operator==(const SessionInfo& rhs) const;\n};\n\n"
  },
  {
    "path": "src/common/database/sqlite_database_scheme.cpp",
    "content": "#include \"sqlite_database_scheme.h\"\n\n\n// Note: this is the initial scheme, please don't change it.\n// To add new stuff (tables/columns/indexes) please do that in\n// sqlite_database_scheme_updates.cpp\nconst char* SQLITE_DATABASE_SCHEME = R\"SOMERANDOMTEXT(\n\nCREATE TABLE IF NOT EXISTS `version` (\n `id` INTEGER PRIMARY KEY,\n `ver` TEXT NOT NULL\n);\n\n\nCREATE TABLE IF NOT EXISTS `env` (\n `id` INTEGER,\n `username`\tTEXT NOT NULL,\n `hostname`\tTEXT NOT NULL,\n PRIMARY KEY(`id`),\n CONSTRAINT unq UNIQUE (username, hostname)\n);\n\n\nCREATE TABLE IF NOT EXISTS `hashmeta` (\n `id` INTEGER ,\n `chunkSize` INTEGER NOT NULL,\n `maxCountOfReads` INTEGER NOT NULL,\n PRIMARY KEY(`id`),\nCONSTRAINT unq UNIQUE (`chunkSize`,`maxCountOfReads`)\n);\n\nCREATE TABLE IF NOT EXISTS `cmd` (\n `id` INTEGER,\n `sessionId` BLOB references session(id),\n `envId` INTEGER NOT NULL references env(id),\n `hashmetaId` INTEGER, /* NULL-able because hash may be disabled */\n `txt` TEXT NOT NULL,\n `returnVal` INTEGER NOT NULL,\n `startTime` timestamp NOT NULL,\n `endTime` timestamp NOT NULL,\n `workingDirectory` TEXT NOT NULL,\n PRIMARY KEY(`id`)\n);\n\n\nCREATE TABLE IF NOT EXISTS `file` (\n `id` INTEGER,\n `path` TEXT NOT NULL,\n `name` TEXT NOT NULL,\n `cmdId` INTEGER NOT NULL references cmd(id) ON DELETE CASCADE,\n `mtime` timestamp NOT NULL,\n `size` INTEGER NOT NULL,\n `hash` BLOB, /* 64 bit unsigned int, so use blob... */\n PRIMARY KEY(`id`)\n);\n\nCREATE TABLE IF NOT EXISTS `exeMeta` (\n `id` INTEGER,\n `envId` INTEGER NOT NULL references `env`(id),\n `exepath` TEXT NOT NULL,\n PRIMARY KEY(`id`)\n);\n\nCREATE TABLE IF NOT EXISTS `exeFile` (\n `id` INTEGER,\n `exeMetaId` INTEGER NOT NULL references `exeMeta`(id),\n `name`  TEXT NOT NULL,\n `mtime` timestamp NOT NULL,\n `size` INTEGER NOT NULL,\n `isExecutable` bool NOT NULL,\n PRIMARY KEY(`id`)\n);\n\nCREATE TABLE IF NOT EXISTS `exeFileCmd` (\n `id` INTEGER,\n `cmdId` INTEGER NOT NULL,\n `exeFileId` INTEGER references exeFile(id),\n PRIMARY KEY(`id`)\n);\n\nCREATE TABLE IF NOT EXISTS `session` (\n `id`   BLOB,\n `comment` TEXT,\n PRIMARY KEY(`id`)\n);\n\nCREATE INDEX IF NOT EXISTS idx_file_name ON `file` (`name`);\nCREATE INDEX IF NOT EXISTS idx_file_mtime ON `file` (`mtime`);\nCREATE INDEX IF NOT EXISTS idx_file_size ON `file` (`size`);\nCREATE INDEX IF NOT EXISTS idx_file_hash ON `file` (`hash`);\n\nCREATE INDEX IF NOT EXISTS idx_exemeta_exepath ON `exeMeta` (`exepath`);\nCREATE INDEX IF NOT EXISTS idx_exeFile_name ON `exeFile` (`name`);\nCREATE INDEX IF NOT EXISTS idx_exeFile_mtime ON `exeFile` (`mtime`);\nCREATE INDEX IF NOT EXISTS idx_exeFile_size ON `exeFile` (`size`);\n\nreplace into version (id, ver) values (1, '0.1');)SOMERANDOMTEXT\";\n\n"
  },
  {
    "path": "src/common/database/sqlite_database_scheme.h",
    "content": "#pragma once\n\n\nextern const char* SQLITE_DATABASE_SCHEME;\n"
  },
  {
    "path": "src/common/database/sqlite_database_scheme_updates.cpp",
    "content": "#include \"sqlite_database_scheme_updates.h\"\n\n\nvoid sqlite_database_scheme_updates::v0_9(QSqlQueryThrow &query)\n{\n    // until this version no scripts (read files) were stored in the database\n    // so the tables can be dropped (and re-created) safely.\n    // Further rename tables to better represent read/write events\n    query.exec(\"ALTER TABLE `file` RENAME TO `writtenFile`\");\n\n    query.exec(\"drop index idx_exemeta_exepath\");\n    query.exec(\"drop index idx_exeFile_name\");\n    query.exec(\"drop index idx_exeFile_mtime\");\n    query.exec(\"drop index idx_exeFile_size\");\n\n    query.exec(\"drop table exeMeta\");\n    query.exec(\"drop table exeFile\");\n    query.exec(\"drop table exeFileCmd\");\n\n    query.exec(\n        \"CREATE TABLE IF NOT EXISTS `readFile` (\"\n          \"`id` INTEGER,\"\n          \"`envId` INTEGER NOT NULL references `env`(id),\"\n          \"`name` TEXT NOT NULL,\"\n          \"`path` TEXT NOT NULL,\"\n          \"`mtime` timestamp NOT NULL,\"\n          \"`size` INTEGER NOT NULL,\"\n          \"`mode` BLOB NOT NULL,\"\n          \"PRIMARY KEY(`id`)\"\n        \")\"\n    );\n\n    query.exec(\n        \"CREATE TABLE IF NOT EXISTS `readFileCmd` (\"\n          \"`id` INTEGER,\"\n          \"`cmdId` INTEGER NOT NULL references `cmd`(id) ON DELETE CASCADE,\"\n          \"`readFileId` INTEGER references readFile(id),\"\n          \"PRIMARY KEY(`id`)\"\n        \")\"\n    );\n}\n\n\nvoid sqlite_database_scheme_updates::v2_1(QSqlQueryThrow &query)\n{\n    // Add support for read files without belonging scripts.\n    // Also start hashing read files as well. Because the same read file\n    // can refer to multiple commands (many-to-many), it would be wrong to\n    // refererence the hashMetaId of a command -> add hashmetaId column.\n    query.exec(\"alter table `readFile` add column `hash` BLOB\");\n    query.exec(\"alter table `readFile` add column `hashmetaId` INTEGER\");\n    query.exec(\"alter table `readFile` add column `isStoredToDisk` INTEGER DEFAULT 1\");\n\n}\n\n\n\nvoid sqlite_database_scheme_updates::v2_2(QSqlQueryThrow &query)\n{\n    // Create indeces to improve query and delete performance.\n    query.exec(\"CREATE INDEX IF NOT EXISTS `idx_writtenFile_cmdId` ON `writtenFile` (`cmdId`)\");\n    query.exec(\"CREATE INDEX IF NOT EXISTS `idx_readFileCmd_cmdId` ON `readFileCmd` (`cmdId`)\");\n    query.exec(\"CREATE INDEX IF NOT EXISTS `idx_readFileCmd_readFileId` ON `readFileCmd` (`readFileId`)\");\n    query.exec(\"CREATE INDEX IF NOT EXISTS `idx_cmd_envId` ON `cmd` (`envId`)\");\n    query.exec(\"CREATE INDEX IF NOT EXISTS `idx_cmd_sessionId` ON `cmd` (`sessionId`)\");\n    query.exec(\"CREATE INDEX IF NOT EXISTS `idx_cmd_hashmetaId` ON `cmd` (`hashmetaId`)\");\n    query.exec(\"CREATE INDEX IF NOT EXISTS `idx_readFile_envId` ON `readFile` (`envId`)\");\n}\n\n\nvoid sqlite_database_scheme_updates::v2_4(QSqlQueryThrow& query){\n    // Create unified, deduplicated paths for read- and written file-events\n    // Ideally we would rename path to pathId and add a foreign key to it,\n    // but this is not possible with current (e.g. 3.22.0) sqlite-versions.\n    // So we recreate the tables writtenFile and readFile\n\n    query.exec(R\"SOMERANDOMTEXT(\n        create table if not exists `pathtable` (\n           `id` INTEGER,\n           `path` TEXT NOT NULL,\n            PRIMARY KEY(`id`)\n        )\n    )SOMERANDOMTEXT\"\n    );\n    query.exec(\"create unique index if not exists \"\n               \" idx_unq_pathtable_path on pathtable (path)\");\n\n    query.exec(\"insert or ignore into pathtable (path) \"\n               \"select path from writtenFile\");\n\n    query.exec (\n    R\"SOMERANDOMTEXT(\n    CREATE TABLE `writtenFile_TMP` (\n        `id`\tINTEGER,\n        `name`\tTEXT NOT NULL,\n        `pathId` INTEGER NOT NULL, /* pathId instead of path */\n        `cmdId`\tINTEGER NOT NULL,\n        `mtime`\ttimestamp NOT NULL,\n        `size`\tINTEGER NOT NULL,\n        `hash`\tBLOB,\n        PRIMARY KEY(`id`),\n        FOREIGN KEY(`cmdId`) REFERENCES `cmd`(`id`) ON DELETE CASCADE,\n        FOREIGN KEY(`pathId`) REFERENCES `pathtable`(`id`) /* new */\n    ))SOMERANDOMTEXT\"\n    );\n\n    // copy all data, use pathId from newly created pathtable\n    query.exec(\"insert into writtenFile_TMP \"\n               \"(id,name,pathId,cmdId,mtime,size,hash) \"\n               \"select id,name,\"\n               \"(select id from pathtable where pathtable.path=writtenFile.path),\"\n               \"cmdId,mtime,size,hash \"\n               \"from writtenFile\");\n\n    query.exec(\"drop table if exists writtenFile\");\n\n    query.exec(\"ALTER TABLE `writtenFile_TMP` RENAME TO `writtenFile`\");\n\n    // (re-)create indices\n    // Foreign keys are always recommended to be indexed:\n    // (see https://www.sqlite.org/foreignkeys.html#fk_indexes )\n    query.exec(\"create index `idx_writtenFile_cmdId` ON `writtenFile` (`cmdId`)\");\n    query.exec(\"create index `idx_writtenFile_size` ON `writtenFile` (`size`)\");\n    query.exec(\"create index `idx_writtenFile_name` ON `writtenFile` (`name`)\");\n    query.exec(\"create index `idx_writtenFile_mtime` ON `writtenFile` (`mtime`)\");\n    query.exec(\"create index `idx_writtenFile_hash` ON `writtenFile` (`hash`)\");\n    // new one:\n    query.exec(\"create index `idx_writtenFile_pathId` ON `writtenFile` (`pathId`)\");\n\n\n    // -------- done with writtenFile\n\n    query.exec(\"insert or ignore into pathtable (path) \"\n               \"select path from readFile\");\n    query.exec (\n    R\"SOMERANDOMTEXT(\n    CREATE TABLE `readFile_TMP` (\n        `id`\tINTEGER,\n        `envId`\tINTEGER NOT NULL,\n        `name`\tTEXT NOT NULL,\n        `pathId` INTEGER NOT NULL, /* pathId instead of path */\n        `mtime`\ttimestamp NOT NULL,\n        `size`\tINTEGER NOT NULL,\n        `mode`\tBLOB NOT NULL,\n        `hash`\tBLOB,\n        `hashmetaId`\tINTEGER,\n        `isStoredToDisk`\tINTEGER DEFAULT 1,\n        FOREIGN KEY(`envId`) REFERENCES `env`(`id`),\n        FOREIGN KEY(`pathId`) REFERENCES `pathtable`(`id`), /* new */\n        PRIMARY KEY(`id`)\n    ))SOMERANDOMTEXT\"\n    );\n\n    // copy all data, use pathId from newly created pathtable\n    query.exec(\n    R\"SOMERANDOMTEXT(\n    insert into readFile_TMP\n    (id,envId,name,pathId,mtime,size,mode,hash,hashmetaId,isStoredToDisk)\n    select id,envId,name,\n    (select id from pathtable where pathtable.path=readFile.path),\n    mtime,size,mode,hash,hashmetaId,isStoredToDisk\n    from readFile\n    )SOMERANDOMTEXT\"\n    );\n\n    query.exec(\"drop table if exists readFile\");\n\n    query.exec(\"alter table `readFile_TMP` rename to `readFile`\");\n\n    query.exec(\"create index if not exists `idx_readFile_envId` ON `readFile` (`envId`)\");\n    // new one:\n    query.exec(\"create index `idx_readFile_pathId` ON `readFile` (`pathId`)\");\n}\n\n\nvoid sqlite_database_scheme_updates::v2_5(QSqlQueryThrow &query)\n{\n    // Forbid sense- and useless duplicates in readFileCmd.\n    // Before the kernel-module backend these occurred rarely,\n    // because fanotify already merges many equal events.\n    // The kernel-module backend does not do so (at least not in\n    // v2.4), so this became apparent quite late.\n\n    // Delete existing duplicates\n    query.exec(\n    R\"SOMERANDOMTEXT(\n    delete from readFileCmd\n    where `id` not in\n             (\n             select  min(`id`)\n             from    readFileCmd\n             group by cmdId, readFileId\n             )\n    )SOMERANDOMTEXT\"\n    );\n\n    query.exec(\"create unique index if not exists \"\n               \" idx_unq_readFileCmd on readFileCmd (cmdId,readFileId)\");\n\n    // Null-values in sql can be nasty, e.g. null != null.\n    // Replace null-hashes by empty strings.\n    query.exec(\"update writtenFile set `hash`='' where `hash` is null\");\n    query.exec(\"update readFile    set `hash`='' where `hash` is null\");\n\n    // Also delete and forbid duplicate written file-events.\n    query.exec(\n    R\"SOMERANDOMTEXT(\n    delete from writtenFile where `id` not in\n    (\n     select  min(`id`) from writtenFile\n     group by `name`,pathId,cmdId,mtime,size,hash\n    )\n    )SOMERANDOMTEXT\"\n    );\n\n    query.exec(\"create unique index if not exists \"\n               \" idx_unq_writtenFile on writtenFile (`name`,pathId,cmdId,mtime,size,hash)\");\n}\n"
  },
  {
    "path": "src/common/database/sqlite_database_scheme_updates.h",
    "content": "#pragma once\n\n#include \"qsqlquerythrow.h\"\n\nnamespace sqlite_database_scheme_updates {\n    void v0_9(QSqlQueryThrow& query); // 0.8 -> 0.9\n    void v2_1(QSqlQueryThrow& query); // 2.0 -> 2.1\n    void v2_2(QSqlQueryThrow& query); // 2.1 -> 2.2\n    void v2_4(QSqlQueryThrow& query); // 2.3 -> 2.4\n    void v2_5(QSqlQueryThrow& query); // 2.4 -> 2.5\n\n}\n\n"
  },
  {
    "path": "src/common/database/sqlquery.cpp",
    "content": "\n#include <QDebug>\n#include \"sqlquery.h\"\n#include \"exccommon.h\"\n#include \"util.h\"\n\n\nconst QString &SqlQuery::query() const\n{\n    return m_query;\n}\n\nQString &SqlQuery::query()\n{\n    return m_query;\n}\n\nconst QVariantList &SqlQuery::values() const\n{\n    return m_values;\n}\n\nvoid SqlQuery::clear()\n{\n    m_query.clear();\n    m_values.clear();\n    m_columnSet.clear();\n    m_tablenames.clear();\n    m_ascending = true;\n    m_limit = NO_LIMIT;\n}\n\nbool SqlQuery::isEmpty() const\n{\n    return m_query.isEmpty();\n}\n\n\n\nvoid SqlQuery::addWithAnd(const QString &columnName, const QVariant &value,\n                          const CompareOperator &operator_)\n{\n    addWithAnd(columnName, { value }, QVector<CompareOperator>{operator_});\n}\n\n/// @overload\nvoid SqlQuery::addWithAnd(const QString &columnName,\n                          const QVariantList &values, const CompareOperator &operator_,\n                          bool innerAND)\n{\n    addWithAnd(columnName, values, QVector<CompareOperator>{operator_}, innerAND);\n}\n\n\n\nvoid SqlQuery::addWithAnd(const QString &columnName, const QVariantList &values,\n                          const QVector<CompareOperator> &operators, bool innerAND)\n{\n    addWithConnector(columnName, values, operators, innerAND, true);\n}\n\nvoid SqlQuery::addWithAnd(const SqlQuery &sqlQ)\n{\n    addWithConnector(sqlQ, true);\n}\n\nvoid SqlQuery::addWithOr(const SqlQuery &sqlQ)\n{\n    addWithConnector(sqlQ, false);\n}\n\n/// Add the given values to the query-string and -values.\n/// If a QVariant.isNull insert \"is null\" instead of a placeholder.\n/// @param values: The number of values must match to the number of operators,\n///                except for the BETWEEN-operator (see below)\n/// @param operators: The comparsion operators used for each value. In case of\n///                   BETWEEN, only one operator may be passed.\n/// @param innerAnd: If true, connect the column-value pair with AND, else with OR\n///                  (in case of BETWEEN it is ignored)\n/// @param outerAnd: if true, connect the whole column-value pair with a single\n///                  AND, else use OR.\n///\n/// @throws QExcIllegalArgument\nvoid SqlQuery::addWithConnector(const QString& columnName, const QVariantList& values,\n                      const QVector<CompareOperator>& operators, bool innerAND,\n                                bool outerAnd){\n    if(values.isEmpty()){\n        throw QExcIllegalArgument(QString(\"%1: %2 must no be empty\")\n                                    .arg(__func__,\n                                         GET_VARIABLE_NAME(values)));\n    }\n\n    auto actualOps = expandOperatorsIfNeeded(operators, values.size());\n\n    writeConnectorPrefix(outerAnd);\n\n    auto valueIt = values.begin();\n    auto operatorIt = actualOps.begin();\n\n    QByteArray innerJunction;\n    if(innerAND ||\n            (operators.size() == 1 &&\n             operators.first().asEnum() == E_CompareOperator::BETWEEN)){\n        innerJunction = \" and \";\n    } else {\n        innerJunction = \" or \";\n    }\n    while(valueIt != values.end()){\n        if(valueIt != values.begin()){\n            m_query += innerJunction;\n        }\n        const QVariant & var = *valueIt;\n\n        if(var.isNull()){\n            // null values are only allowed for certain operators:\n            QString operatorNow;\n            switch (operatorIt->asEnum()) {\n            case E_CompareOperator::EQ:\n            case E_CompareOperator::LIKE:\n                operatorNow = \" is null \"; break;\n            case E_CompareOperator::NE:\n                operatorNow = \" is not null \"; break;\n            default:\n                throw QExcIllegalArgument(\"null is illegal for operator \" +\n                                          operatorIt->asSql() + \" in column \" +\n                                          columnName\n                                          );\n            }\n             m_query += columnName + operatorNow ;\n        } else {\n            if(operatorIt->asEnum() == E_CompareOperator::BETWEEN){\n                throw QExcIllegalArgument(\"BETWEEN passed within list with len > 1\");\n            }\n\n            m_query += columnName + operatorIt->asSql() + \"? \";\n            m_values.push_back(var);\n        }\n\n        ++valueIt;\n        ++operatorIt;\n    }\n\n    writeConnectorSuffix();\n    addToTableCols(columnName);\n}\n\n\n/// Add the other query \"as is\" using the given connector\nvoid SqlQuery::addWithConnector(const SqlQuery& other, bool outerAnd){\n    if(other.isEmpty()){\n        return;\n    }\n\n    writeConnectorPrefix(outerAnd);\n\n    m_query += other.query();\n    m_values.append(other.values());\n    m_columnSet.insert(other.m_columnSet.begin(), other.m_columnSet.end());\n    m_tablenames.insert(other.m_tablenames.begin(), other.m_tablenames.end());\n\n    writeConnectorSuffix();\n}\n\n\n\n/// If the number of operators does not match the number of values, duplicate them, so they\n/// do (in that case len(operators) *must* be 1).\n/// The BETWEEN operator is a special case, it is transformed into >= and <=.\nQVector<CompareOperator>\nSqlQuery::expandOperatorsIfNeeded(const QVector<CompareOperator> &operators,\n                                                         int nValues) const\n{\n    if(operators.size() == nValues){\n        return operators;\n    }\n    if(operators.size() != 1){\n        throw QExcIllegalArgument(\n                    QString(\"len(operators) %1 !=len(values) %2 but not 1\")\n                    .arg(operators.size(), nValues));\n    }\n    const CompareOperator & op = operators.first();\n    if(op.asEnum() == E_CompareOperator::BETWEEN){\n        if(nValues != 2){\n            throw QExcIllegalArgument(QString(\"BETWEEN operator requires 2 values but %1\"\n                                              \"were given\").arg(nValues));\n        }\n        return { E_CompareOperator::GE, E_CompareOperator::LE };\n    } \n    // same operator for all values\n    auto newOps = QVector<CompareOperator>();\n    newOps.reserve(nValues);\n    for(int i=0; i < nValues; i++){\n        newOps.push_back(op);\n    }\n    return newOps;\n    \n\n}\n\n/// remeber that this table-column was used. If it contains a dot,\n/// the part before it is interpreted as tablename, after it as column.\nvoid SqlQuery::addToTableCols(const QString &tableCol)\n{\n    int dotIdx = tableCol.indexOf('.');\n    if(dotIdx == -1){\n        // assume column name without table name\n        m_columnSet.insert(tableCol);\n    } else {\n        m_tablenames.insert(tableCol.left(dotIdx));\n        m_columnSet.insert(tableCol.mid(dotIdx + 1));\n    }\n}\n\nvoid SqlQuery::writeConnectorPrefix(bool outerAnd)\n{\n    if(! m_query.isEmpty()){\n        m_query += (outerAnd) ? \" and \" : \" or \";\n    }\n    m_query += \" ( \";\n}\n\nvoid SqlQuery::writeConnectorSuffix()\n{\n     m_query += \" ) \";\n}\n\n/// setting the query is only allowed, it no values were set (yet)\nvoid SqlQuery::setQuery(const QString &query)\n{\n    if(! m_values.isEmpty()){\n        throw QExcProgramming(\"setting query while values not empty\");\n    }\n    m_query = query;\n}\n\n/// @return true, if the *exact* columnname was added via 'addWithAnd'\nbool SqlQuery::containsColumn(const QString &col) const\n{\n    return m_columnSet.find(col) != m_columnSet.end();\n}\n\nbool SqlQuery::containsTablename(const QString &table) const\n{\n    return m_tablenames.find(table) != m_tablenames.end();\n}\n\n\nint SqlQuery::limit() const\n{\n    return m_limit;\n}\n\n/// @param limit:  NO_LIMIT means *not* to impose a limit\nvoid SqlQuery::setLimit(int limit)\n{\n    m_limit = limit;\n}\n\n/// @return 'limit x '-string or space character, if NO_LIMIT is imposed\nQString SqlQuery::mkLimitString() const\n{\n    return (m_limit == NO_LIMIT) ? \" \" : \"limit \" + QString::number(m_limit) + \" \";\n}\n\nbool SqlQuery::ascending() const\n{\n    return m_ascending;\n}\n\nconst QString &SqlQuery::ascendingStr() const\n{\n    static const QString ASC_STR =  \"asc \";\n    static const QString DESC_STR = \"desc \";\n    if(m_ascending) return ASC_STR;\n    return DESC_STR;\n}\n\nvoid SqlQuery::setAscending(bool ascending)\n{\n    m_ascending = ascending;\n}\n\n/// Make an sql-query that always finds zero results (where 0)\nSqlQuery mkInertSqlQuery()\n{\n    SqlQuery q;\n    q.query() = \" 0 \";\n    return q;\n}\n"
  },
  {
    "path": "src/common/database/sqlquery.h",
    "content": "#pragma once\n\n#include <type_traits>\n#include <QVector>\n#include <QVariant>\n#include <unordered_set>\n\n#include \"compareoperator.h\"\n#include \"util.h\"\n\n\n/// Helper class to build the query part after a sql where-clause.\n/// Column-value(s)-pairs can be added to the SqlQuery in a NULL-safe\n/// manner. An AND-operator is only added, if necessary (query-columncount > 1).\nclass SqlQuery\n{\npublic:\n    static const int NO_LIMIT {-1};\n\n    void addWithAnd(const QString& columnName, const QVariant& value,\n                    const CompareOperator& operator_=CompareOperator());\n\n    void addWithAnd(const QString& columnName, const QVariantList& values,\n                    const CompareOperator& operator_=CompareOperator(), bool innerAND=false);\n\n    void addWithAnd(const QString& columnName, const QVariantList& values,\n                    const QVector<CompareOperator>& operators, bool innerAND=false);\n\n    void addWithAnd(const SqlQuery& sqlQ);\n    void addWithOr(const SqlQuery& sqlQ);\n\n    const QString& query() const;\n    QString& query();\n\n\n    const QVariantList& values() const;\n\n    void clear();\n\n    bool isEmpty() const;\n\n    bool ascending() const;\n    const QString& ascendingStr() const;\n\n    void setAscending(bool ascending);\n\n    int limit() const;\n    QString mkLimitString() const;\n    void setLimit(int limit);\n\n\n    void setQuery(const QString &query);\n\n    bool containsColumn(const QString& col) const;\n    bool containsTablename(const QString& table) const;\n\nprivate:\n\n    void addWithConnector(const QString& columnName, const QVariantList& values,\n                          const QVector<CompareOperator>& operators, bool innerAND=false,\n                          bool outerAnd=false);\n    void addWithConnector(const SqlQuery& sqlQ, bool outerAnd);\n\n\n    QVector<CompareOperator> expandOperatorsIfNeeded(\n            const QVector<CompareOperator> &operators, int nValues) const;\n    void addToTableCols(const QString& tableCol);\n    void writeConnectorPrefix(bool outerAnd);\n    void writeConnectorSuffix();\n\n    QString m_query;\n    QVariantList m_values;\n    std::unordered_set<QString> m_columnSet;\n    std::unordered_set<QString> m_tablenames;\n    bool m_ascending {true};\n    int m_limit {NO_LIMIT};\n\n};\n\n\nSqlQuery mkInertSqlQuery();\n\n"
  },
  {
    "path": "src/common/database/storedfiles.cpp",
    "content": "#include <cassert>\n\n#include \"storedfiles.h\"\n#include \"db_connection.h\"\n#include \"util.h\"\n#include \"qfilethrow.h\"\n#include \"os.h\"\n\nconst QString& StoredFiles::getReadFilesDir()\n{\n    static const QString path = db_connection::getDatabaseDir() + \"/readFiles\";\n    return path ;\n}\n\n/// creates path of stored files if not exist\n/// @return the created path\n/// @throws QExcIo\nconst QString& StoredFiles::mkpath()\n{\n    const auto & p = getReadFilesDir();\n    if( ! QDir(p).mkpath(p)){\n        throw QExcIo(qtr(\"Failed to the create directory for the stored read files at %1\")\n                             .arg(p));\n    }\n    return p;\n}\n\nStoredFiles::StoredFiles()\n{\n    const auto & path = getReadFilesDir();\n    m_readFilesDir.setPath(path);\n    this->mkpath();\n}\n\nQString StoredFiles::mkPathStringToStoredReadFile(const FileReadInfo &info)\n{\n    return mkPathStringToStoredReadFile(info.idInDb);\n}\n\nQString StoredFiles::mkPathStringToStoredReadFile(qint64 idInDb)\n{\n    return\n      pathJoinFilename(StoredFiles::getReadFilesDir(), QString::number(idInDb));\n}\n\nbool StoredFiles::deleteReadFile(const QString &fname)\n{\n    return m_readFilesDir.remove(fname);\n}\n\n/// @throws QExcIo\nvoid StoredFiles::addReadFile(const QString &fname, const QByteArray &data)\n{\n    const QString fPath = m_readFilesDir.absoluteFilePath(fname);\n    QFileThrow f(fPath);\n    try {\n        f.open(QFile::OpenModeFlag::WriteOnly | QFile::OpenModeFlag::Truncate);\n        f.write(data);\n    } catch (const QExcIo&) {\n        f.remove();\n        throw ;\n    }\n}\n\n\n/// @param info: the read file already loaded from the database\n/// @param dir: the directory where to restore it (warning: override without confirmation)\n/// @param openReadFileInDb: the for reading opened file corresponding to the info-database-entry.\nvoid StoredFiles::restoreReadFileAtDIr(const FileReadInfo &info, const QDir& dir,\n                                             const QFile &openReadFileInDb)\n{\n    assert(openReadFileInDb.isOpen());\n    const QString filePath = dir.absoluteFilePath(info.name);\n    QFileThrow dstFile(filePath);\n    dstFile.open(QFile::OpenModeFlag::WriteOnly);\n    os::sendfile(dstFile.handle(), openReadFileInDb.handle(), static_cast<size_t>(info.size));\n    os::fchmod(dstFile.handle(), info.mode);\n}\n\n\n/// @overload\nvoid StoredFiles::restoreReadFileAtDIr(const FileReadInfo &info, const QDir &dir)\n{\n    QFileThrow f(m_readFilesDir.absoluteFilePath(QString::number(info.idInDb)));\n    f.open(QFile::OpenModeFlag::ReadOnly);\n    restoreReadFileAtDIr(info, dir, f);\n}\n"
  },
  {
    "path": "src/common/database/storedfiles.h",
    "content": "#pragma once\n\n#include <QDir>\n\n#include \"fileinfos.h\"\n\nclass StoredFiles\n{\npublic:\n\n    static const QString &getReadFilesDir();\n\n    static const QString &mkpath();\n\n    StoredFiles();\n\n    QString mkPathStringToStoredReadFile(const FileReadInfo& info);\n    QString mkPathStringToStoredReadFile(qint64 idInDb);\n\n    bool deleteReadFile(const QString& fname);\n\n    void addReadFile(const QString& fname, const QByteArray& data);\n\n    void restoreReadFileAtDIr(const FileReadInfo &info, const QDir& dir,\n                                const QFile &openReadFileInDb);\n\n    void restoreReadFileAtDIr(const FileReadInfo &info, const QDir& dir);\n\nprivate:\n    QDir m_readFilesDir;\n\n};\n\n"
  },
  {
    "path": "src/common/fdcommunication.cpp",
    "content": "\n#include <cassert>\n#include <sys/socket.h>\n\n\n\n#include \"fdcommunication.h\"\n#include \"os.h\"\n#include \"cleanupresource.h\"\n#include \"util.h\"\n\n\nusing namespace fdcommunication;\n\n\nstruct MessageHeader {\n    int msgId;\n    size_t len; // length of custom payload\n    bool containsFd;\n};\n\nstatic_assert (std::is_pod<MessageHeader>(), \"\");\n\n\n\nSocketCommunication::SocketCommunication() : m_sockFd(-1)\n{}\n\n\n/// Block until we receive a message from the other endpoint of the set socket.\n/// Make sure, the internal receive buffer is large enough\n/// (* it most not be empty*).\nvoid SocketCommunication::receiveMessages(Messages *messages){\n    messages->clear();\n    if(m_receiveCtrlMsgBuf.size() < int(CMSG_SPACE(sizeof(int)))){\n        m_receiveCtrlMsgBuf.resize(CMSG_SPACE(sizeof(int)));\n    }\n\n    assert(! m_receiveBuf.isEmpty());\n\n    iovec ioVector = { m_receiveBuf.data(), static_cast<size_t>(m_receiveBuf.size()) };\n\n    struct msghdr msgHdr{};\n\n    msgHdr.msg_iov = &ioVector;\n    msgHdr.msg_iovlen = 1;\n    msgHdr.msg_control = m_receiveCtrlMsgBuf.data();\n    msgHdr.msg_controllen = size_t(m_receiveCtrlMsgBuf.size());\n\n    size_t len = os::recvmsg(m_sockFd, &msgHdr);\n    if (len == 0) {\n        messages->push_back(-1);\n        return;\n    }\n\n    if(len < sizeof (MessageHeader)){\n        throw ExcFdComm(qtr(\"Bad socket message received (too small)\"));\n    }\n\n    char* pData = m_receiveBuf.data();\n    const char* finalpData = pData + len;\n\n    struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msgHdr);\n\n\n    int* currentFd = reinterpret_cast<int*>(CMSG_DATA(cmsg));\n    while(true){\n        auto *customHeader = reinterpret_cast<MessageHeader*>(pData);\n        // Consume the header, find out the payload-length...\n        pData += sizeof (MessageHeader);\n        assert(pData <= finalpData);\n        QByteArray payload(pData, static_cast<int>(customHeader->len));\n        Message message(customHeader->msgId, payload);\n\n        if (customHeader->containsFd) {\n            // we received an fd via SCM_RIGHTS.\n            // see also man 3 cmsg            \n            assert(cmsg->cmsg_level == SOL_SOCKET &&\n                   cmsg->cmsg_type  == SCM_RIGHTS);\n            message.fd = *currentFd;\n            ++currentFd;\n        }\n\n        messages->push_back(message);\n        // ... consume the payload\n        pData += customHeader->len;\n        assert(pData <= finalpData);\n        if(pData >= finalpData){\n            break;\n        }\n\n    }\n}\n\n\nvoid SocketCommunication::sendMsg(const SocketCommunication::Message &message)\n{\n    this->sendMessages({message});\n}\n\nvoid SocketCommunication::sendMessages(const SocketCommunication::Messages &messages)\n{\n    QVector<iovec> iovects;\n    iovects.reserve(messages.size() * 2);\n\n    QVector<MessageHeader> headers;\n    headers.reserve(messages.size());\n\n    QVector<int> fds;\n    for(const auto& msg : messages){\n        assert(msg.msgId >= 0);\n\n        // brace-initialze MessageHeader{}, otherwise valgrind complains (unitialized)\n        headers.push_back(MessageHeader{});\n        headers.last().msgId = msg.msgId;\n        headers.last().len = size_t(msg.bytes.length());\n        headers.last().containsFd = (msg.fd != -1) ;\n\n        iovects.push_back({&headers.last(), sizeof (MessageHeader)});\n\n        iovec io;\n        io.iov_base = const_cast<void*>(static_cast<const void*>(msg.bytes.data()));\n        io.iov_len = size_t(msg.bytes.size());\n        iovects.push_back(io);\n\n        if(msg.fd != -1){\n            fds.push_back(msg.fd);\n        }\n    }\n    assert(iovects.capacity() == iovects.size());\n    assert(headers.capacity() == headers.size());\n\n\n    struct msghdr messageHeader{};\n    messageHeader.msg_iov = iovects.data();\n    messageHeader.msg_iovlen = iovects.size();\n\n\n    QByteArray buf( int(CMSG_SPACE(size_t(fds.size()) * sizeof(int)) ), '\\0');\n\n    if(! fds.isEmpty()){\n        messageHeader.msg_control = buf.data();\n        messageHeader.msg_controllen = size_t(buf.size());\n        struct cmsghdr *cmsg = CMSG_FIRSTHDR(&messageHeader);\n        cmsg->cmsg_level = SOL_SOCKET;\n        cmsg->cmsg_type = SCM_RIGHTS;\n        cmsg->cmsg_len = CMSG_LEN(sizeof(int) * fds.size());\n        int *fdptr = reinterpret_cast<int*>(CMSG_DATA(cmsg) );\n        memcpy(fdptr, fds.data(), fds.size() * sizeof(int));\n    }\n    try {\n        os::sendmsg(m_sockFd, &messageHeader);\n    } catch (const os::ExcOs& ex) {\n        QString msg = (messages.isEmpty()) ? \"EMPTY message\" : messages[0].bytes;\n        throw ExcFdComm(qtr(\"Failed to send «%1» - %2\").arg(msg).arg(ex.what()));\n    }\n}\n\nint SocketCommunication::sockFd() const\n{\n    return m_sockFd;\n}\n\nvoid SocketCommunication::setSockFd(int fd)\n{\n    m_sockFd = fd;\n}\n\nvoid SocketCommunication::setReceiveBufferSize(int s)\n{\n    m_receiveBuf.resize(s);\n}\n\n/// set size of file descriptor buffer\nvoid SocketCommunication::setReceiveFdSize(int s)\n{\n    m_receiveCtrlMsgBuf.resize( s * int(CMSG_SPACE(sizeof(int))) );\n}\n\nSocketCommunication::Messages SocketCommunication::receiveMessages()\n{\n    Messages messages;\n    receiveMessages(&messages);\n    return messages;\n}\n\n\n\n\n\n"
  },
  {
    "path": "src/common/fdcommunication.h",
    "content": "#pragma once\n\n#include <QVector>\n\n#include \"exccommon.h\"\n\n\nnamespace fdcommunication {\n\n\nclass ExcFdComm : public QExcCommon\n{\npublic:\n    using QExcCommon::QExcCommon;\n};\n\n\nclass SocketCommunication\n{\npublic:\n    struct Message{\n        /// message-types < 0 are for internal use: -1 indicates an empty message\n        /// (end of data: all instances of the other endpoint were closed).\n        Message(int mesgType=-1,\n                const QByteArray& b=QByteArray(),\n                int fd = -1) : msgId(mesgType), bytes(b), fd(fd) {}\n\n        bool operator==(const Message &rhs) const {\n            return this->msgId == rhs.msgId &&\n                    this->bytes == rhs.bytes &&\n                    this->fd == rhs.fd;\n        }\n\n        int msgId;\n        QByteArray bytes;\n        int fd;\n    };\n    typedef QVector<Message> Messages;\n\n    SocketCommunication();\n\n    int sockFd() const;\n    void setSockFd(int sockFd);\n    void setReceiveBufferSize(int s);\n    void setReceiveFdSize(int s);\n\n    Messages receiveMessages();\n    void receiveMessages(Messages* messages);\n\n    void sendMsg(const Message& message);\n    void sendMessages(const Messages& messages);\n\nprivate:\n    QByteArray m_receiveBuf;\n    QByteArray m_receiveCtrlMsgBuf;\n    int m_sockFd;\n};\n\n\n} // namespace fdcommunication\n\n\n\n\n"
  },
  {
    "path": "src/common/fileeventhandler.cpp",
    "content": "\n#include <cassert>\n#include <string>\n#include <sys/fanotify.h>\n#include <cstring>\n#include <unistd.h>\n#include <fstream>\n#include <climits>\n#include <QtDebug>\n\n#include \"app.h\"\n#include \"excos.h\"\n#include \"fileeventhandler.h\"\n#include \"logger.h\"\n#include \"osutil.h\"\n#include \"os.h\"\n#include \"qfddummydevice.h\"\n#include \"qoutstream.h\"\n#include \"settings.h\"\n#include \"stdiocpp.h\"\n#include \"strlight_util.h\"\n\nstatic QString buildFilecacheDir(){\n    return\n       pathJoinFilename(QDir::tempPath(),\n          QString(app::SHOURNAL_RUN) + \"-cache-\" + QString::number(os::getpid()));\n}\n\n/// meant to be called as real user within the original mount namespace\nFileEventHandler::FileEventHandler() :\n    m_filecacheDir(buildFilecacheDir()),\n    m_uid(os::getuid()),\n    m_ourProcFdDirDescriptor(os::open(\"/proc/self/fd\", O_DIRECTORY)),\n    m_pathbuf(PATH_MAX + 1, '\\0'),\n    m_fdStringBuf(snprintf( nullptr, 0, \"%d\", std::numeric_limits<int>::max()) + 1, '\\0'),\n    r_wCfg(Settings::instance().writeFileSettings()),\n    r_rCfg(Settings::instance().readFileSettings()),\n    r_scriptCfg(Settings::instance().readEventScriptSettings()),\n    r_hashCfg(Settings::instance().hashSettings())\n{\n    FILE* f = stdiocpp::fopen(\n                pathJoinFilename(m_filecacheDir.path().toUtf8(),\n                                 QByteArray(\"file-events\")), \"w+\");\n    m_fileEvents.setFile(f);\n\n    this->fillAllowedGroups();\n    if(r_hashCfg.hashEnable){\n        // This is typically larger than maxCountOfReads*chunkSize\n        // but does not have to be.\n        m_hashControl.getXXHash().resizeBuf(1024*512);\n    }\n}\n\nFileEventHandler::~FileEventHandler(){\n    fclose(m_fileEvents.file());\n    try {\n        os::close(m_ourProcFdDirDescriptor);\n    } catch (const os::ExcOs& e) {\n        logCritical << __func__ << e.what();\n    }\n}\n\nvoid FileEventHandler::fillAllowedGroups()\n{\n    auto groups = os::getgroups();\n    auto egid = os::getegid();\n    auto gid = os::getgid();\n    for(const auto& g : groups){\n        if(gid == egid || g != egid){\n            // only insert the 'real' groups\n            m_groups.insert(g);\n        }\n    }\n}\n\n\n/// A file-write-event is considered allowed here, if the *real user*\n/// is allowed to write to a given file, be it because being the owner,\n/// the file is writable by everyone or he/she is\n/// part of the owning group.\n/// Background is that we are interested in write events, however,\n/// reporting file modifcations of a root-process should not be allowed.\nbool FileEventHandler::userHasWritePermission(const struct stat &st)\n{\n    return (st.st_mode & S_IWUSR && st.st_uid == m_uid)  ||\n            st.st_mode & S_IWOTH ||\n           (st.st_mode & S_IWGRP && m_groups.find(st.st_gid) != m_groups.end());\n}\n\n\n/// See doc of write writeEventAllowed, replace 'write' with 'read'\nbool FileEventHandler::userHasReadPermission(const struct stat &st)\n{\n    return (st.st_mode & S_IRUSR && st.st_uid == m_uid)  ||\n            st.st_mode & S_IROTH ||\n           (st.st_mode & S_IRGRP && m_groups.find(st.st_gid) != m_groups.end());\n\n}\n\n/// check whether to accept the file according to file extension and mime-type.\n/// If both set (not-empty) only one has to match,\n/// if both unset, accept all,\n/// else only take the set one into account.\nbool FileEventHandler::readFileTypeMatches(const Settings::ScriptFileSettings &scriptCfg,\n                                           int fd, const StrLight& fpath)\n{\n    if(! scriptCfg.includeExtensions.empty() && ! scriptCfg.includeMimetypes.empty()){\n        // both not empty, consider both (OR'd)\n        return fileExtensionMatches(scriptCfg.includeExtensions, fpath) ||\n               mimeTypeMatches(fd, scriptCfg.includeMimetypes);\n    }\n    if(scriptCfg.includeExtensions.empty() && scriptCfg.includeMimetypes.empty()){\n        return true;\n    }\n    // one is empty, the other not\n    if(! scriptCfg.includeExtensions.empty()){\n        return fileExtensionMatches(scriptCfg.includeExtensions, fpath);\n    }\n    assert(! scriptCfg.includeMimetypes.empty());\n    return mimeTypeMatches(fd, scriptCfg.includeMimetypes);\n}\n\nvoid FileEventHandler::readLinkOfFd(int fd, StrLight &output)\n{\n    assert(m_ourProcFdDirDescriptor != -1);\n    // uitoa, safe in this context (but not in general),\n    // is a lot faster, so do not use snprintf here.\n    // snprintf( &m_fdStringBuf[0], m_fdStringBuf.size() (+1?), \"%d\", fd);\n    util_performance::uitoa(fd, m_fdStringBuf.data());\n    ssize_t path_len = ::readlinkat(m_ourProcFdDirDescriptor,\n                                    m_fdStringBuf.data(),\n                                    output.data(), output.capacity());\n    if (path_len == -1 ){\n        throw os::ExcReadLink(\"readlinkat failed for fd \" + std::to_string(fd));\n    }\n    output.resize(StrLight::size_type(path_len));\n}\n\nbool FileEventHandler::fileExtensionMatches(const Settings::StrLightSet &validExtensions,\n                                            const StrLight& fullPath)\n{\n    strlight_util::findFileExtension_raw(fullPath, m_extensionBuf);\n    if(m_extensionBuf.empty()){\n        return false;\n    }\n    return validExtensions.find(m_extensionBuf) != validExtensions.end();\n}\n\nbool FileEventHandler::mimeTypeMatches(int fd, const Settings::MimeSet &validMimetypes)\n{\n    QFdDummyDevice f(fd);\n    const auto mimetype = m_mimedb.mimeTypeForData(&f).name();\n    os::lseek(fd, 0, SEEK_SET);\n    return validMimetypes.find(mimetype) != validMimetypes.end();\n}\n\n\nQString FileEventHandler::getTmpDirPath() const\n{\n    return m_filecacheDir.path();\n}\n\n\nvoid FileEventHandler::clearEvents()\n{\n    m_fileEvents.clear();\n}\n\nFileEvents &FileEventHandler::fileEvents()\n{\n    return m_fileEvents;\n}\n\n/// @param enableReadActions: if false, do not read from fd, regardless of settings\n/// @throws ExcOs, CXXHashError\nvoid FileEventHandler::handleCloseWrite(int fd)\n{\n    // first lookup the path, then stat, so no filename contains a trailing '(deleted)'\n    readLinkOfFd(fd, m_pathbuf);\n    const auto st = os::fstat(fd);\n    if(st.st_nlink == 0){\n        // always ignore deleted files\n        logDebug << \"closedwrite-event ignored (file deleted):\"\n                 << m_pathbuf;\n        return;\n    }\n\n    if(! userHasWritePermission(st)){\n        logDebug << \"closedwrite-event ignored (no write permission):\"\n                 << m_pathbuf;\n        return;\n    }\n\n    if(! r_wCfg.includePaths->isSubPath(m_pathbuf, true) ){\n        logDebug << \"closedwrite-event ignored (no subpath of include_dirs): \"\n                 << m_pathbuf;\n        return;\n    }\n    if(r_wCfg.excludePaths->isSubPath(m_pathbuf, true) ){\n        logDebug << \"closedwrite-event ignored (subpath of exclude_dirs): \"\n                 << m_pathbuf;\n        return;\n    }\n\n    if(r_wCfg.excludeHidden && pathIsHidden(m_pathbuf) &&\n            ! r_wCfg.includePathsHidden->isSubPath(m_pathbuf, true)){\n        logDebug << \"closedwrite-event ignored (hidden file):\"\n                 << m_pathbuf;\n        return;\n    }\n\n    if(m_fileEvents.wEventCount() >= r_wCfg.maxEventCount){\n        logDebug << \"closedwrite-event dropped:\"\n                 << m_pathbuf;\n        m_fileEvents.incrementDropCount(O_WRONLY);\n        return;\n    }\n\n    HashValue hash;\n    if(r_hashCfg.hashEnable){\n        hash =  m_hashControl.genPartlyHash(fd, st.st_size,\n                                                       r_hashCfg.hashMeta);\n    }\n    m_fileEvents.write(O_WRONLY, m_pathbuf, st, hash);\n\n    logDebug << \"closedwrite-event recorded: \"\n             << m_pathbuf;\n\n    // maybe_todo: reimplement that, if desired (?).\n    // if(m_pArgparse->getCommandline()){\n    //     info.cmdline = pidcontrol::findCmdlineOfPID(pid); // PID from fanotify event data...\n    // }\n}\n\nbool FileEventHandler::generalReadSettingsSayLogIt(const bool userHasWritePerm,\n                                                   const StrLight& filepath)\n{\n    if(! r_rCfg.enable){\n        return false;\n    }\n    if(r_rCfg.onlyWritable && ! userHasWritePerm){\n        logDebug << \"general read event ignored: no write permission:\"\n                 << filepath;\n        return false;\n    }\n\n    if( ! r_rCfg.includePaths->isSubPath(filepath, true)){\n        logDebug << \"general read event ignored: not a subpath of any included path:\"\n                 << filepath;\n        return false;\n    }\n    if( r_rCfg.excludePaths->isSubPath(filepath, true)){\n        logDebug << \"general read event ignored: is a subpath of an excluded path:\"\n                 << filepath;\n        return false;\n    }\n\n    if(r_rCfg.excludeHidden && pathIsHidden(filepath) &&\n            ! r_rCfg.includePathsHidden->isSubPath(filepath, true)){\n        logDebug << \"general read event ignored: hidden file:\"\n                 << filepath;\n        return false;\n    }\n\n    return true;\n}\n\nbool\nFileEventHandler::scriptReadSettingsSayLogIt(bool userHasWritePerm,\n                                                  const StrLight &fpath,\n                                                  const os::stat_t &st,\n                                                  int fd)\n{\n    if(! r_scriptCfg.enable){\n        return false;\n    }\n    // repeat check here: fanotify-read-events are only unregistered, if\n    // general read events are disabled...\n    if(m_fileEvents.rStoredFilesCount() >= r_scriptCfg.maxCountOfFiles){\n        logDebug << \"possible script-event ignored: already collected enough files:\"\n                 << fpath;\n        return false;\n    }\n\n    if(r_scriptCfg.onlyWritable && ! userHasWritePerm){\n        logDebug << \"possible script-event ignored: no write permission:\"\n                 << fpath;\n        return false;\n    }\n\n    if(st.st_size > r_scriptCfg.maxFileSize){\n        logDebug << \"possible script-event ignored: file too big:\"\n                 << fpath;\n        return false;\n    }\n\n    if( ! r_scriptCfg.includePaths->isSubPath(fpath, true)){\n        logDebug << \"possible script-event ignored: file\"\n                 << fpath << \"is not a subpath of any included path\";\n        return false;\n    }\n    if( r_scriptCfg.excludePaths->isSubPath(fpath, true)){\n        logDebug << \"possible script-event ignored: file\"\n                 << fpath << \"is a subpath of an excluded path\";\n        return false;\n    }\n\n    if(r_scriptCfg.excludeHidden && pathIsHidden(fpath) &&\n            ! r_scriptCfg.includePathsHidden->isSubPath(fpath, true)){\n        logDebug << \"possible script-event ignored: hidden file:\"\n                 << fpath;\n        return false;\n    }\n\n    if(! readFileTypeMatches(r_scriptCfg, fd, fpath)){\n        logDebug << \"script-event ignored: neither file-extension nor mime-type \"\n                    \"matches for \" << fpath;\n        return false;\n    }\n    return true;\n}\n\nbool FileEventHandler::pathIsHidden(const StrLight &fullPath)\n{\n    return fullPath.find(\"/.\") != StrLight::npos;\n}\n\n\n/// @param enableReadActions: if false, do not read from fd, regardless of settings\nvoid FileEventHandler::handleCloseRead(int fd)\n{\n    // first lookup the path, then stat, so no filename contains a trailing '(deleted)'\n    readLinkOfFd(fd, m_pathbuf);\n    const auto st = os::fstat(fd);\n    if(st.st_nlink == 0){\n        // always ignore deleted files\n        logDebug << \"read-event ignored (file deleted): \"\n                 << m_pathbuf;\n        return;\n    }\n\n    if(! userHasReadPermission(st)){\n        logDebug << \"read-event ignored (read not allowed): \"\n                 << m_pathbuf;\n        return;\n    }\n    const bool userHasWritePerm = userHasWritePermission(st);\n    const bool logGeneralReadEvent = generalReadSettingsSayLogIt(userHasWritePerm,\n                                                                 m_pathbuf);\n    bool logScriptEvent = scriptReadSettingsSayLogIt(userHasWritePerm, m_pathbuf,\n                                                     st, fd);\n    if(! logGeneralReadEvent && ! logScriptEvent){\n        return;\n    }\n    if(m_fileEvents.rEventCount() >= r_rCfg.maxEventCount){\n        logDebug << \"closedread-event dropped:\"\n                 << m_pathbuf;\n        m_fileEvents.incrementDropCount(O_RDONLY);\n        return;\n    }\n\n    HashValue hash;\n    if(r_hashCfg.hashEnable){\n        assert(os::ltell(fd) == 0);\n        hash =  m_hashControl.genPartlyHash(fd, st.st_size,\n                                                       r_hashCfg.hashMeta);\n    }\n    int storeFd;\n    if(logScriptEvent){\n        assert(os::ltell(fd) == 0);\n        storeFd = fd;\n    } else {\n        storeFd = -1;\n    }\n\n    m_fileEvents.write(O_RDONLY, m_pathbuf, st, hash, storeFd);\n\n    logDebug << \"closedread-event recorded (collect script:\" << logScriptEvent << \")\"\n             << m_pathbuf;\n}\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "src/common/fileeventhandler.h",
    "content": "#pragma once\n\n#include <sys/stat.h>\n#include <string>\n#include <unordered_set>\n#include <QHash>\n#include <QPair>\n#include <QMimeDatabase>\n#include <QTemporaryDir>\n\n#include \"hashcontrol.h\"\n#include \"nullable_value.h\"\n#include \"fileevents.h\"\n#include \"settings.h\"\n#include \"os.h\"\n#include \"strlight.h\"\n#include \"util_performance.h\"\n\n\n/// Collect desired file-event (read/write) metadata based on a file-descriptor.\n/// The Metadata is stored within binary files at a temporary directory\n/// (some read files may be stored there as a whole, based on user configuration).\n/// Events are filtered beforehand, e.g. for matching user or include/exclude paths.\nclass FileEventHandler\n{\npublic:\n    FileEventHandler();\n    ~FileEventHandler();\n\n    void handleCloseWrite(int fd);\n    void handleCloseRead(int fd);\n\n    FileEvents& fileEvents();\n\n    void clearEvents();\n\n    QString getTmpDirPath() const;\n\npublic:\n    Q_DISABLE_COPY(FileEventHandler)\n    DISABLE_MOVE(FileEventHandler)\n\n\nprivate:\n    void fillAllowedGroups();\n\n    bool userHasWritePermission(const struct stat& st);\n    bool userHasReadPermission(const struct stat& st);\n    bool readFileTypeMatches(const Settings::ScriptFileSettings& scriptCfg, int fd,\n                             const StrLight &fpath);\n    void readLinkOfFd(int fd, StrLight &output);\n\n    bool fileExtensionMatches(const Settings::StrLightSet &validExtensions,\n                              const StrLight &fullPath);\n    bool mimeTypeMatches(int fd, const Settings::MimeSet& validMimetypes);\n    bool generalReadSettingsSayLogIt(bool userHasWritePerm,\n                                     const StrLight &filepath);\n    bool scriptReadSettingsSayLogIt(bool userHasWritePerm,\n                                    const StrLight &fpath,\n                                    const os::stat_t& st,\n                                    int fd);\n    bool pathIsHidden(const StrLight &fullPath);\n\n    QTemporaryDir m_filecacheDir;\n    FileEvents m_fileEvents;\n    HashControl m_hashControl;\n    std::unordered_set<gid_t> m_groups;\n    uid_t m_uid; // cached real uid\n    int m_ourProcFdDirDescriptor; // holds open fd on /proc/self/fd\n    QMimeDatabase m_mimedb;\n    StrLight m_pathbuf;\n    StrLight m_fdStringBuf;\n    StrLight m_extensionBuf;\n\n    const Settings::WriteFileSettings& r_wCfg;\n    const Settings::ReadFileSettings& r_rCfg;\n    const Settings::ScriptFileSettings& r_scriptCfg;\n    const Settings::HashSettings& r_hashCfg;\n};\n\n"
  },
  {
    "path": "src/common/fileevents.cpp",
    "content": "#include \"fileevents.h\"\n\n#include <sys/stat.h>\n#include <cassert>\n\n#include \"stdiocpp.h\"\n#include \"strlight.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"logger.h\"\n#include \"user_kernerl.h\"\n\n\n\n\nint FileEvent::flags() const\n{\n    return m_close_event.flags;\n}\n\nuint64_t FileEvent::mtime() const\n{\n    return m_close_event.mtime;\n}\n\nsize_t FileEvent::size() const\n{\n    return m_close_event.size;\n}\n\nuint64_t FileEvent::mode() const\n{\n    return m_close_event.mode;\n}\n\nHashValue FileEvent::hash() const\n{\n    return (m_close_event.hash_is_null)\n            ? HashValue()\n            : HashValue(m_close_event.hash);\n}\n\noff_t FileEvent::fileContentSize() const\n{\n    return m_close_event.bytes;\n}\n\noff_t FileEvent::fileContentStart() const\n{\n    return m_fileContentStart;\n}\n\nconst char *FileEvent::path() const\n{\n    return m_path.data();\n}\n\nFILE *FileEvent::file() const\n{\n    return m_file;\n}\n\nvoid FileEvent::setPath(const char *path)\n{\n    m_path = path;\n}\n\n\n/////////////////////////////////////////////////////////////////////////////////////\n\n\n\n/// Read null-terminated c-string from file into buf, return len\nstatic int freadCstring(FILE* file, char* buf){\n    int c;\n    int pathIdx = 0;\n    while((c=stdiocpp::fgetc_unlocked(file)) != EOF){\n        buf[pathIdx++] = char(c);\n        if(c == '\\0'){\n            break;\n        }\n    }\n    if(pathIdx == 0){\n        throw QExcIo(QString(\"EOF reached without expected null-terminator for file %1\")\n                     .arg(osutil::findPathOfFd<QByteArray>(fileno(file)).constData()));\n    }\n    return pathIdx;\n}\n\nbool FileEvents::isReadEvent(int flags)\n{\n    switch (flags) {\n    case O_RDONLY:\n    case O_RDWR: return true;\n    default: return false;\n    }\n}\n\nbool FileEvents::isWriteEvent(int flags)\n{\n    switch (flags) {\n    case O_WRONLY:\n    case O_RDWR: return true;\n    default: return false;\n    }\n}\n\nFileEvents::FileEvents() :\n    m_wbuf_lastReadDir(PATH_MAX, '\\0'),\n    m_wbuf_lastWrittenDir(PATH_MAX, '\\0')\n{\n\n}\n\n/// Write the file-event to the logfile. If storefd != -1 and\n/// the file has a st_size greater than zero, the whole file is copied\n/// as well.\nvoid FileEvents::write(int flags, const StrLight &path,\n                       const struct stat &st, HashValue hash, int storefd)\n{\n    bool isREvent =  isReadEvent(flags);\n\n    m_eventTmp.flags = flags;\n    m_eventTmp.mtime = st.st_mtime;\n    m_eventTmp.size = st.st_size;\n    m_eventTmp.mode = st.st_mode;\n\n    m_eventTmp.hash = (hash.isNull()) ? 0  : hash.value();\n    m_eventTmp.hash_is_null = hash.isNull();\n\n    m_eventTmp.bytes = (storefd == -1) ? 0 : st.st_size;\n    auto oldOffset = stdiocpp::ftell(m_file);\n    stdiocpp::fwrite_unlocked(&m_eventTmp , sizeof(m_eventTmp), 1, m_file );\n\n    if(m_eventTmp.bytes > 0){\n        assert(os::ltell(storefd) == 0);\n        int targetfd = fileno_unlocked(m_file);\n        // kernel-copy has no idea of our buffer - flush it\n        stdiocpp::fflush(m_file);\n        auto sent = os::sendfile(targetfd, storefd, m_eventTmp.bytes);\n        stdiocpp::fseek(m_file, os::ltell(targetfd), SEEK_SET);\n\n        if(sent != off_t(m_eventTmp.bytes)){\n            // should happpen very rarely - seek back and correct file size\n            logInfo << qtr(\"Could only collect %1 of %2 bytes for file %3\")\n                       .arg(sent).arg(m_eventTmp.bytes)\n                       .arg(osutil::findPathOfFd<QByteArray>(storefd).constData());\n\n            m_eventTmp.bytes = sent;\n            stdiocpp::fseek(m_file, oldOffset, SEEK_SET);\n            stdiocpp::fwrite_unlocked(&m_eventTmp , sizeof(m_eventTmp), 1, m_file );\n            stdiocpp::fseek(m_file, 0, SEEK_END);\n        }\n        if(isREvent){\n            m_rStoredFilesCount++;\n        }\n        if(isWriteEvent(flags)){\n            m_wStoredFilesCount++;\n        }\n    }\n\n     writeFilenameToFile(path, isREvent);\n\n     if(isREvent){\n         m_rEventCount++;\n     } else {\n         m_wEventCount++;\n     }\n}\n\nvoid FileEvents::incrementDropCount(int eventType)\n{\n    switch (eventType) {\n    case O_RDONLY: m_rDroppedCount++; break;\n    case O_WRONLY: m_wDroppedCount++; break;\n    default: throw QExcProgramming(\"bad event type: \" +\n                                   QString::number(eventType));\n    }\n}\n\nvoid FileEvents::clear()\n{\n    stdiocpp::ftruncate_unlocked(m_file);\n    m_rStoredFilesCount = 0;\n    m_wStoredFilesCount = 0;\n    m_rEventCount = 0;\n    m_wEventCount = 0;\n}\n\n\n\nFileEvent *FileEvents::read()\n{\n    if(stdiocpp::fread_unlocked(&m_fileEvent.m_close_event,\n                                sizeof(shournalk_close_event), 1, m_file) != 1){\n        return nullptr;\n    }\n    if(m_fileEvent.fileContentSize() > 0){\n        // remember offset where file content begins, the caller may use this\n        m_fileEvent.m_fileContentStart = stdiocpp::ftell(m_file);\n        stdiocpp::fseek(m_file, m_fileEvent.fileContentSize(), SEEK_CUR);\n    }\n    auto filename_len = freadCstring(m_file, m_pathTmp);\n\n    // If last and current directory-path is equal, the producer\n    // may have omitted it after the first time,\n    // for read- and write-events respectively. In this case,\n    // the path does not start with a '/'\n    auto& lastDir = (isWriteEvent(m_fileEvent.m_close_event.flags))\n            ? m_rbuf_lastWrittenDir : m_rbuf_lastReadDir;\n    if(m_pathTmp[0] == '/'){\n        m_fileEvent.m_path = QByteArray(m_pathTmp, filename_len);\n        lastDir = splitAbsPath<QByteArray>(m_fileEvent.m_path).first;\n    } else {\n        // use dir-path from last time and append current filename\n        m_fileEvent.m_path = pathJoinFilename(\n                                lastDir,\n                                QByteArray::fromRawData(m_pathTmp, filename_len));\n    }\n    return &m_fileEvent;\n}\n\nFILE *FileEvents::file() const\n{\n    return m_file;\n}\n\nvoid FileEvents::setFile(FILE *file)\n{\n    m_fileEvent.m_file = file;\n    m_file = file;\n}\n\nuint FileEvents::wEventCount() const\n{\n    return m_wEventCount;\n}\n\nuint FileEvents::wDroppedCount() const\n{\n    return m_wDroppedCount;\n}\n\n\nuint FileEvents::rEventCount() const\n{\n    return m_rEventCount;\n}\n\nuint FileEvents::rDroppedCount() const\n{\n    return m_rDroppedCount;\n}\n\n\nuint FileEvents::rStoredFilesCount() const\n{\n    return m_rStoredFilesCount;\n}\n\nuint FileEvents::wStoredFilesCount() const\n{\n    return m_wStoredFilesCount;\n}\n\n\nvoid FileEvents::writeFilenameToFile(const StrLight &path, bool isREvent)\n{\n    auto & lastDir = (isREvent) ? m_wbuf_lastReadDir : m_wbuf_lastWrittenDir;\n    const char* filename;\n    size_t len_with_nul;\n    int slashIdx = path.lastIndexOf('/');\n    if(unlikely(slashIdx < 0)){\n        throw QExcProgramming(QString(\"Invalid path %1\").arg(path.c_str()));\n    }\n\n    if(unlikely(slashIdx == 0)){\n        // a file written to the root directory.\n        // KISS and always write full path.\n        // size + 1 -> include nul.\n        stdiocpp::fwrite_unlocked(path.c_str(), path.size()+ 1, 1, m_file );\n        lastDir.resize(0); // invalidate last cached dir\n        return;\n    }\n\n    if(slashIdx == int(lastDir.size())  &&\n       memcmp(lastDir.constData(), path.constData(), slashIdx) == 0){\n        // optimization: don't write full path but only filename (this\n        // is later handled when reading back for read- and write-events\n        // respectively).\n        filename = path.c_str() + slashIdx + 1;\n        len_with_nul = path.size() - slashIdx; // including nul\n    } else {\n        // write full path and remeber current directory for next time\n        filename = path.c_str();\n        len_with_nul = path.size() + 1; // including nul\n        lastDir.resize(slashIdx);\n        memcpy(lastDir.data(), path.c_str(), slashIdx);\n    }\n    stdiocpp::fwrite_unlocked(filename, len_with_nul, 1, m_file );\n}\n\n\n\n\n\n\n"
  },
  {
    "path": "src/common/fileevents.h",
    "content": "#pragma once\n\n#include <linux/limits.h>\n\n#include \"shournalk_user.h\"\n#include \"nullable_value.h\"\n#include \"strlight.h\"\n\n\nclass FileEvent {\npublic:\n\n    int flags() const; /* One of O_RDONLY, O_WRONLY, O_RDWR */\n    uint64_t mtime() const;\n    size_t size() const;\n    uint64_t mode() const;\n    HashValue hash() const;\n    off_t fileContentSize() const;\n    off_t fileContentStart() const;\n    const char* path() const;\n\n    FILE *file() const;\n\nprivate:\n    void setPath(const char* path);\n\n    shournalk_close_event m_close_event;\n    QByteArray m_path;\n    off_t m_fileContentStart;\n    FILE* m_file;\n\n    friend class FileEvents;\n    friend class DbCtrlTest;\n};\n\n\n/// Write file-events (in binary format) to a log-file and\n/// read them later on.\nclass FileEvents\n{\npublic:\n    static bool isReadEvent(int flags);\n    static bool isWriteEvent(int flags);\n\npublic:\n    FileEvents();\n\n    void write(int flags, const StrLight &path,\n               const struct stat &st, HashValue hash, int storefd=-1);\n    void incrementDropCount(int eventType);\n\n    void clear();\n\n    FileEvent* read();\n\n    FILE *file() const;\n    void setFile(FILE *file);\n\n    uint rEventCount() const;\n    uint rDroppedCount() const;\n    uint rStoredFilesCount() const;\n\n    uint wEventCount() const;\n    uint wDroppedCount() const;\n    uint wStoredFilesCount() const;\n\n\n\nprivate:\n    Q_DISABLE_COPY(FileEvents)\n\n    void writeFilenameToFile(const StrLight& path, bool isREvent);\n\n    FILE* m_file{};\n    FileEvent m_fileEvent{};\n    shournalk_close_event m_eventTmp{};\n\n    StrLight m_wbuf_lastReadDir;\n    StrLight m_wbuf_lastWrittenDir;\n\n    QByteArray m_rbuf_lastReadDir;\n    QByteArray m_rbuf_lastWrittenDir;\n    char m_pathTmp[PATH_MAX];\n    uint m_rEventCount{0};\n    uint m_rDroppedCount{0};\n    uint m_rStoredFilesCount{0};\n\n    uint m_wEventCount{0};\n    uint m_wDroppedCount{0};\n    uint m_wStoredFilesCount{0};\n};\n\n\n\n"
  },
  {
    "path": "src/common/generic_container.h",
    "content": "#pragma once\n\n#include <type_traits>\n\n\ntemplate <class ContainerT>\nclass TypeHas_push_back\n{\n    template <class T>\n    // signature has to match *exactly* (indclduing e.g. a 'const' at the end for a\n    // const object method\n    static std::true_type testSignature(void (T::*)(const typename ContainerT::value_type&));\n\n    template <class T>\n    static decltype(testSignature(&T::push_back)) test(std::nullptr_t);\n\n    template <class T>\n    static std::false_type test(...);\n\npublic:\n    using type = decltype(test<ContainerT>(nullptr));\n    static const bool value = type::value;\n};\n\n\n/// Allow adding to containers which either implement push_back or\n/// insert. The container must provide Container::value_type.\ntemplate <class ContainerT,\n          typename std::enable_if<TypeHas_push_back<ContainerT>::value,\n          std::nullptr_t>::type = nullptr>\nvoid addToContainer(ContainerT& t, const typename ContainerT::value_type& val ){\n    t.push_back(val);\n}\n\ntemplate <class ContainerT,\n          typename std::enable_if<!TypeHas_push_back<ContainerT>::value,\n          std::nullptr_t>::type = nullptr>\nvoid addToContainer(ContainerT& t, const typename ContainerT::value_type& val ){\n    t.insert(val);\n}\n\n\n\n"
  },
  {
    "path": "src/common/groupcontrol.cpp",
    "content": "\n#include <algorithm>\n#include <QDebug>\n\n#include \"groupcontrol.h\"\n#include \"os.h\"\n\n\n/// @return all 'real' groups, the calling user is a member\n/// of\nos::Groups\n    groupcontrol::generateRealGroups(){\n    // according to man getgroups(2) \"it is unspecified whether the\n    // effective group ID of the calling process is included in the returned\n    // list\".\n    os::Groups groups = os::getgroups();\n    gid_t egid = os::getegid();\n    if(egid != os::getgid()){\n        auto egidIter = std::find(groups.begin(), groups.end(), egid);\n        if(egidIter != groups.end()){\n            groups.erase(egidIter);\n        }\n    }\n    return groups;\n}\n\n/// create a one-to-one-mapping of param groups for the /proc/$pid/gid_map.\n/// Be a little smart and merge consecutive groups into one IdMapEntry.\ngroupcontrol::GidMapRanges_T groupcontrol::generateGidMapRanges(const os::Groups &groups_){\n    if(groups_.empty()){\n        qDebug() << \"generateGidMapRanges called with empty groups\";\n        return std::vector<S_IdMapEntry<gid_t>>();\n    }\n\n    auto groups = groups_;\n    std::sort(groups.begin(), groups.end());\n\n    std::vector<S_IdMapEntry<gid_t>> mapRanges;\n    mapRanges.reserve(groups.size());\n    mapRanges.emplace_back(groups.at(0));  // at least one group exists (real gid)\n\n    for(size_t i=1; i < groups.size(); i++){\n        S_IdMapEntry<gid_t> & previousRange = mapRanges.back();\n        if(groups[i] == previousRange.idInNs + 1){\n            // gid-range possible\n            previousRange.count++;\n        } else {\n            mapRanges.emplace_back(groups[i]);\n        }\n    }\n\n    return mapRanges;\n}\n\n\n\n/////////////////////// PRIVATE /////////////////////////\n\n\n\n"
  },
  {
    "path": "src/common/groupcontrol.h",
    "content": "#pragma once\n\n#include <vector>\n\n#include \"idmapentry.h\"\n#include \"os.h\"\n\nnamespace groupcontrol {\ntypedef std::vector<S_IdMapEntry<gid_t> > GidMapRanges_T;\n\nos::Groups generateRealGroups();\n\nGidMapRanges_T\n    generateGidMapRanges(const os::Groups & groups);\n\n\n}\n"
  },
  {
    "path": "src/common/hashcontrol.cpp",
    "content": "\n#include \"hashcontrol.h\"\n\n/// xxhash parts of a file (or the whole file in case of a small one) according to the\n/// specified hashmeta-parameters.\n/// @return hash-value of null, if 0 bytes were read.\n/// @throws ExcOs, CXXHashError\nHashValue HashControl::genPartlyHash(int fd, qint64 filesize, const HashMeta &hashMeta,\n                                     bool resetOffset)\n{\n    const off64_t seektstep = filesize / hashMeta.maxCountOfReads;\n    auto hashRes = m_hash.digestFile(\n                        fd,\n                        hashMeta.chunkSize,\n                        seektstep ,\n                        hashMeta.maxCountOfReads);\n    HashValue hashVal;\n    if(hashRes.count_of_bytes > 0){\n        if(resetOffset){\n            os::lseek(fd, 0, SEEK_SET);\n        }\n        hashVal = hashRes.hash;\n    }\n    return hashVal;\n}\n\n\nCXXHash &HashControl::getXXHash()\n{\n    return m_hash;\n}\n\n"
  },
  {
    "path": "src/common/hashcontrol.h",
    "content": "#pragma once\n\n#include \"nullable_value.h\"\n#include \"cxxhash.h\"\n#include \"hashmeta.h\"\n#include \"os.h\"\n\nclass HashControl\n{\npublic:\n\n    HashValue genPartlyHash(int fd, qint64 filesize, const HashMeta& hashMeta,\n                            bool resetOffset=true);\n    CXXHash& getXXHash();\nprivate:\n    CXXHash m_hash;\n};\n\n\n\n"
  },
  {
    "path": "src/common/hashmeta.cpp",
    "content": "#include \"hashmeta.h\"\n\n\nHashMeta::HashMeta(size_type chunks, size_type maxCountOfR)\n    : chunkSize(chunks),\n      maxCountOfReads(maxCountOfR)\n{}\n\nbool HashMeta::isNull() const\n{\n    return chunkSize == 0 && maxCountOfReads == 0;\n}\n\nbool HashMeta::operator==(const HashMeta &rhs) const\n{\n    return chunkSize == rhs.chunkSize &&\n           maxCountOfReads == rhs.maxCountOfReads;\n}\n"
  },
  {
    "path": "src/common/hashmeta.h",
    "content": "#pragma once\n\n#include <qglobal.h>\n#include <cstddef>\n\n#include \"database/db_globals.h\"\n\nstruct HashMeta\n{\n    typedef int size_type;\n\n    HashMeta() = default;\n    HashMeta(size_type chunks, size_type maxCountOfR);\n    size_type chunkSize {};\n    size_type maxCountOfReads {};\n    qint64 idInDb {db::INVALID_INT_ID} ;\n\n    bool isNull() const;\n\n    bool operator==(const HashMeta& rhs) const;\n};\n\n"
  },
  {
    "path": "src/common/idmapentry.h",
    "content": "#pragma once\n\n#include <sys/types.h>\n#include <string>\n\ntemplate <typename T>\nclass S_IdMapEntry\n{\npublic:\n    T idInNs;\n    T idOutOfNs;\n    T count;\n\n    S_IdMapEntry(T inNs_, T idOutOfNs_,\n                 T count_=1) :\n        idInNs(inNs_),\n        idOutOfNs(idOutOfNs_),\n        count(count_){}\n\n    S_IdMapEntry(T idInBoth) :\n        S_IdMapEntry(idInBoth, idInBoth){}\n\n    /*\n    S_IdMapEntry(const S_IdMapEntry& other) :\n        idInNs(other.idInNs),\n        idOutOfNs(other.idOutOfNs),\n        count(other.count) {} */\n\n    /// @return the string in the form the gid or uid map expects:\n    /// 0 1000 1 (including trailing newline)\n    std::string to_string() const\n    {\n        return std::to_string(idInNs) + \" \"\n                + std::to_string(idOutOfNs) + \" \"\n                + std::to_string(count) + '\\n';\n    }\n};\n\n"
  },
  {
    "path": "src/common/interrupt_handler.cpp",
    "content": "#include <cassert>\n#include <unordered_map>\n\n#include \"logger.h\"\n#include \"interrupt_handler.h\"\n#include \"os.h\"\n#include \"exccommon.h\"\n\n\nstatic thread_local bool g_withinInterProtect = false;\nstatic thread_local bool g_signalOccurred = false;\nstatic thread_local std::vector<bool> g_occurred_sigs{};\n// map of signal and index into the g_occurred_sigs vector\nstatic thread_local std::unordered_map<int, int> g_sig_indeces{};\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid ip_dummySighandler(int signum){\n    auto it = g_sig_indeces.find(signum);\n    if(it == g_sig_indeces.end()){\n        const char msg[] = \"ip_dummySighandler: error: failed to find signal...\\n\";\n        os::write(2, msg, sizeof(msg)-1);\n        return;\n    }\n    g_occurred_sigs[it->second] = true;\n    g_signalOccurred = true;\n}\n\n#ifdef __cplusplus\n}\n#endif\n\n\nInterruptProtect::InterruptProtect()\n{}\n\nInterruptProtect::InterruptProtect(int signum) :\n    InterruptProtect(std::vector<int>{signum})\n{}\n\nInterruptProtect::InterruptProtect(const std::vector<int> &sigs){\n    this->enable(sigs);\n}\n\nbool InterruptProtect::signalOccurred()\n{\n    return g_signalOccurred;\n}\n\n\nInterruptProtect::~InterruptProtect()\n{\n    if(! g_withinInterProtect){\n        return;\n    }\n    try {\n        this->disable();\n    }  catch (const std::exception& e) {\n        logCritical << __func__ << e.what();\n    }\n}\n\nvoid InterruptProtect::enable(const std::vector<int> &sigs)\n{\n    if(g_withinInterProtect){\n        throw QExcProgramming(QString(__func__) + \": only one instance allowed per thread\");\n    }\n    g_withinInterProtect = true;\n    g_signalOccurred = false;\n    m_sigs = sigs;\n    m_oldActions.resize(sigs.size());\n\n    g_occurred_sigs.resize(sigs.size(), false);\n\n    struct sigaction act{};\n    act.sa_handler = ip_dummySighandler;\n    sigemptyset (&act.sa_mask);\n    act.sa_flags = SA_RESTART;\n    for(int idx=0; idx < int(sigs.size()); idx++){\n        auto s = sigs[idx];\n        g_sig_indeces[s] = idx;\n        os::sigaction(s, &act, &m_oldActions[idx]);\n    }\n}\n\nvoid InterruptProtect::disable()\n{\n    if(! g_withinInterProtect){\n        throw QExcProgramming(QString(__func__) + \": not enabled\");\n    }\n    // restore previous handlers\n    for(int idx=0; idx < int(m_sigs.size()); idx++){\n        try {\n            os::sigaction(m_sigs[idx], &m_oldActions[idx], nullptr);\n        }  catch (const os::ExcOs& e) {\n            logCritical << e.what();\n        }\n    }\n    g_withinInterProtect = false;\n    if(! g_signalOccurred){\n        return;\n    }\n    for (auto& it: g_sig_indeces) {\n        auto sig = it.first;\n        auto idx = it.second;\n        if(g_occurred_sigs.at(idx)){\n            logDebug << \"sending sig\" << sig;\n            kill(getpid(), sig);\n        }\n    }\n}\n"
  },
  {
    "path": "src/common/interrupt_handler.h",
    "content": "#pragma once\n\n#include <csignal>\n#include <qglobal.h>\n#include <vector>\n\n#include \"util.h\"\n\n/// Defer processing of signals\n/// until destruction. Automatically restart (some)\n/// system-calls during that time (SA_RESTART).\n/// Only one instance allowed at a time per thread!\nclass InterruptProtect\n{\npublic:    \n    InterruptProtect();\n    InterruptProtect(int signum);\n    InterruptProtect(const std::vector<int> &sigs);\n    ~InterruptProtect();\n\n    void enable(const std::vector<int> &sigs);\n    void disable();\n    bool signalOccurred();\n\n\npublic:\n    Q_DISABLE_COPY(InterruptProtect)\n    DEFAULT_MOVE(InterruptProtect)\nprivate:\n    std::vector<int> m_sigs{};\n    std::vector<struct sigaction> m_oldActions{};\n};\n\n"
  },
  {
    "path": "src/common/limited_priority_queue.h",
    "content": "#pragma once\n\n#include <cstddef>\n#include <limits>\n#include <queue>\n\n\ntemplate<typename T, typename container, typename compare>\nclass limited_priority_queue : public std::priority_queue<T, container, compare>\n{\npublic:\n    typedef typename container::value_type value_type;\n\npublic:\n\n    void\n    push(const T& val)\n    {\n        std::priority_queue<T, container, compare>::push(val);\n        if(static_cast<size_t>(this->size()) > m_maxSize){\n            this->pop();\n        }\n    }\n\n    void\n    setMaxSize(const size_t &maxSize){\n        m_maxSize = maxSize;\n    }\n\n    template<typename PopContainerT>\n    PopContainerT\n    popAll(bool reverse=false){\n        PopContainerT ret(this->size());\n        if(reverse){\n            for(auto it = ret.rbegin(); it != ret.rend(); ++it ){\n                *it = this->top();\n                this->pop();\n            }\n        } else {\n            for(auto & el : ret){\n                el = this->top();\n                this->pop();\n            }\n        }\n        return ret;\n    }\n\nprotected:\n    size_t m_maxSize{std::numeric_limits<size_t>::max()};\n};\n\n"
  },
  {
    "path": "src/common/logger.cpp",
    "content": "#include <cassert>\n#include <QDateTime>\n#include <QtGlobal>\n#include <QStandardPaths>\n#include <QTextStream>\n#include <QFileInfo>\n#include <QDir>\n#include <utility>\n\n#include \"logger.h\"\n#include \"qoutstream.h\"\n#include \"app.h\"\n#include \"exccommon.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"staticinitializer.h\"\n#include \"cflock.h\"\n\n\n\nnamespace  {\nQString g_logPreamble;\nconst QtMsgType DEFAULT_VERBOSITY = QtMsgType::QtWarningMsg;\nQtMsgType g_verbosityLvl = DEFAULT_VERBOSITY;\nint g_verbosityLvlOrdinal=logger::msgTypeToOrdinal(DEFAULT_VERBOSITY);\n\n\n\nvoid messageHandler(QtMsgType msgType, const QMessageLogContext &context, const QString &msg)\n{\n    int typeOrdinal = logger::msgTypeToOrdinal(msgType);\n\n#ifndef NDEBUG\n    if (msgType == QtDebugMsg) {\n        if(typeOrdinal >= g_verbosityLvlOrdinal){\n            QErr() << g_logPreamble << \" Dbg: \"\n                 << \"(\" << QFileInfo(context.file).fileName() <<\":\" << context.line << \") \"\n                 << \" pid \" << getpid() << \": \"  << msg << '\\n' ;\n        }\n        // Don't log debug messages to file\n        return;\n    }\n#else\n    Q_UNUSED(context)\n#endif\n\n    const QString dateTime = QDateTime::currentDateTime().toString(\n                \"yyyy-MM-dd HH:mm:ss\");\n    QString msgTypeStr = logger::msgTypeToStr(msgType);\n\n    if(typeOrdinal >= g_verbosityLvlOrdinal){\n        QErr() << g_logPreamble << \" \"<<dateTime<<' '<< msgTypeStr<<\": \"<<msg<< \"\\n\";\n    }\n    if(logger::getLogRotate().file().isOpen()){\n        logger::getLogRotate().stream() <<dateTime<<' '<< msgTypeStr\n                                       << \" pid \" << getpid() << \": \" <<msg << \"\\n\";\n    }\n}\n\n} // namespace\n\n\n\n\n/// @param preamble: printed before every log message\nvoid logger::setup(const QString& preamble)\n{\n    g_logPreamble = preamble;\n    qInstallMessageHandler(messageHandler);\n\n}\n\n\nvoid logger::enableLogToFile(const QString& filename)\n{\n    QDir d(logDir());\n    if( ! d.mkpath(logDir())){\n        throw QExcCommon(QString(\"Failed to create %1\").arg(logDir()));\n    }\n    const QString path = logDir() + \"/log_\" + filename;\n    getLogRotate().setFullpath(path);\n    getLogRotate().setup();\n}\n\n\n\nvoid logger::disableLogToFile()\n{\n    getLogRotate().cleanup();\n}\n\n\nvoid logger::setVerbosityLevel(QtMsgType lvl)\n{\n    g_verbosityLvl = lvl;\n    g_verbosityLvlOrdinal = msgTypeToOrdinal(lvl);\n}\n\nvoid logger::setVerbosityLevel(const char *str)\n{\n    setVerbosityLevel(strToMsgType(str));\n}\n\n\nQtMsgType logger::getVerbosityLevel()\n{\n    return g_verbosityLvl;\n}\n\n\n\nlogger::LogRotate &logger::getLogRotate()\n{\n    static logger::LogRotate logRotate;\n    return logRotate;\n}\n\n\n\n/// call enableLogToFile first\nconst QString& logger::logDir()\n{\n    static const QString logDir = QStandardPaths::writableLocation(QStandardPaths::CacheLocation);\n    return logDir;\n}\n\n\nconst char *logger::msgTypeToStr(QtMsgType msgType)\n{\n    switch (msgType) {\n    case QtDebugMsg: return \"dbg\";\n#if QT_VERSION >= QT_VERSION_CHECK(5, 5, 0)\n    case QtInfoMsg     : return \"info\";\n#endif\n    case QtWarningMsg  : return \"warning\";\n    case QtCriticalMsg : return \"critical\";\n    case QtFatalMsg    : return \"fatal\";\n    }\n    static StaticInitializer initOnFirstCall( [&msgType](){\n        logWarning << \"msgTypeToStr\" << \"unknown messagetype\" << msgType;\n    });\n    return \"warning\";\n}\n\n\n\n\n/// Unfortunately qt messagetype are not really in a meaningful order - do it ourselves.\nint logger::msgTypeToOrdinal(QtMsgType msgType)\n{\n    switch (msgType) {\n    case QtDebugMsg: return 0;\n#if QT_VERSION >= QT_VERSION_CHECK(5, 5, 0)\n    case QtInfoMsg     : return 1;\n#endif\n    case QtWarningMsg  : return 2;\n    case QtCriticalMsg : return 3;\n    case QtFatalMsg    : return 4;\n    }\n    static StaticInitializer initOnFirstCall( [&msgType](){\n        logWarning << \"msgTypeToOrdinal\" << \"unknown messagetype\" << msgType;\n    });\n    return 2;\n}\n\n\n\n/// str is epected to be valid!\nQtMsgType logger::strToMsgType(const char *str)\n{\n    switch (str[0]) {\n    case 'd': return QtMsgType::QtDebugMsg;\n    case 'i':\n#if QT_VERSION >= QT_VERSION_CHECK(5, 5, 0)\n         return QtMsgType::QtInfoMsg;\n#else\n         return QtMsgType::QtWarningMsg;\n#endif\n    case 'w': return QtMsgType::QtWarningMsg;\n    case 'c': return QtMsgType::QtCriticalMsg;\n    case 'f': return QtMsgType::QtFatalMsg;\n    default: break;\n    }\n    static StaticInitializer initOnFirstCall( [&str](){\n        logWarning << \"strToMsgType\" << \"unknown messagetype\" << str\n                   << \"using default\";\n    });\n    return DEFAULT_VERBOSITY;\n}\n\n\n\n\nlogger::LogRotate::LogRotate(QString  fullpath)\n    : m_fullpath(std::move(fullpath))\n{\n}\n\nQTextStream &logger::LogRotate::stream()\n{\n    return m_stream;\n}\n\n\nconst QFile& logger::LogRotate::file() const\n{\n    return m_file;\n}\n\n\nvoid logger::LogRotate::openLogfileOrThrow()\n{\n    if(! m_file.open(QFile::OpenModeFlag::Append | QIODevice::Text)){\n        throw QExcIo(qtr(\"Failed to open logile at %1 - %2\").arg(m_fullpath,\n                     m_file.errorString()));\n    }\n}\n\n/// Open the log file in append mode, rotate logfiles race-free, if too big.\n/// @throws ExcOs, QExcIo\nvoid logger::LogRotate::setup()\n{\n    assert(! m_fullpath.isEmpty());\n    m_file.setFileName(m_fullpath);\n\n    openLogfileOrThrow();\n\n    if(os::fstat(m_file.handle()).st_size > 50000){\n        // race condition - make sure to only rename once:\n         CFlock l(m_file.handle());\n         l.lockExclusive();\n         // renamed already (by another process)?\n         if(! osutil::findPathOfFd<QByteArray>(m_file.handle()).endsWith(\"_old\")){\n             const std::string path = m_fullpath.toStdString();\n             os::rename(path, path + \"_old\");\n         }\n         l.unlock();\n         m_file.close();\n         // open or create the new logfile:\n         openLogfileOrThrow();\n    }\n    m_stream.setDevice(&m_file);\n}\n\nvoid logger::LogRotate::cleanup()\n{\n    m_file.close();\n}\n\n\nvoid logger::LogRotate::setFullpath(const QString &p)\n{\n    m_fullpath = p;\n}\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "src/common/logger.h",
    "content": "#pragma once\n\n#include <QtGlobal>\n#include <QDebug>\n#include <QFile>\n#include <QTextStream>\n\n#define logDebug qDebug()\n\n#if QT_VERSION < QT_VERSION_CHECK(5, 4, 0)\n// maybe_todo: do something else about the quotes - or suggest\n// user to upgrade their qt-version...\n#define logInfo qWarning()  // no info yet...\n#define logWarning qWarning()\n#define logCritical qCritical()\n#elif QT_VERSION < QT_VERSION_CHECK(5, 5, 0)\n#define logInfo qWarning().noquote()  // no info yet...\n#else\n#define logInfo qInfo().noquote()\n#define logWarning qWarning().noquote()\n#define logCritical qCritical().noquote()\n#endif\n\n\nnamespace logger {\n\n\nclass LogRotate{\npublic:\n    LogRotate(QString fullpath=QString());\n    void setup();\n    void cleanup();\n    void setFullpath(const QString& p);\n\n    QTextStream& stream();\n\n\n    const QFile& file() const;\n\n\n\nprivate:\n    void openLogfileOrThrow();\n\n    QString m_fullpath;\n    QFile m_file;\n    QTextStream m_stream;\n\n};\n\n\nconst char* msgTypeToStr(QtMsgType msgType);\nint msgTypeToOrdinal(QtMsgType msgType);\nQtMsgType strToMsgType(const char* str);\n\n\nvoid setup(const QString &preamble);\nvoid enableLogToFile(const QString &filename);\nvoid disableLogToFile();\nvoid setVerbosityLevel(QtMsgType lvl);\nvoid setVerbosityLevel(const char* str);\nQtMsgType getVerbosityLevel();\n\n\nLogRotate& getLogRotate();\n\nconst QString &logDir();\n\n\n}\n\n\n\n\n"
  },
  {
    "path": "src/common/oscpp/CMakeLists.txt",
    "content": "\n\nadd_library(oscpp_lib\n  cflock.cpp\n  excos.cpp\n  fdentries.cpp\n  os.cpp\n  osutil.cpp\n  oscaps.cpp\n    )\n\ntarget_link_libraries(oscpp_lib PUBLIC\n    Qt5::Core\n    ${CMAKE_DL_LIBS}\n    lib_util\n    cap\n)\n\n\n"
  },
  {
    "path": "src/common/oscpp/cflock.cpp",
    "content": "#include <sys/file.h>\n#include <iostream>\n\n#include \"cflock.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n\n#ifndef NDEBUG\n\nstatic bool checkFdFlockFlags(int fd, int operation){\n    int flags = os::getFdStatusFlags(fd);\n\n    switch (operation) {\n    case LOCK_EX:\n        if(flags & O_WRONLY || flags & O_RDWR) {\n            return true;\n        }\n        std::cerr << \"LOCK_EX: fd opened RDONLY\\n\";\n        break;\n    case LOCK_SH:\n        if(!(flags & O_WRONLY)) {\n            return true;\n        }\n        std::cerr << \"LOCK_SH: fd opened WRONLY\\n\";\n        break;\n    default:\n        std::cerr << \"Bad fd operation \" << operation << \"\\n\";\n        break;\n    }\n    return false;\n}\n\n#endif\n\n/// In order to catch further possible NFS idiosyncrasies (bugs?), better never lock\n/// blocking. See also e.g. shournal's commit 1918f88.\nstatic void doLockNB(int fd, int operation){\n    for(int i=0; ; i++){\n        try {\n            os::flock(fd, operation | LOCK_NB);\n            return;\n        } catch (const os::ExcOs& ex) {\n            if(ex.errorNumber() != EWOULDBLOCK){\n                throw;\n            }\n            if(i>9){\n                std::cerr << \"doLockNB: gave up waiting for lock\\n\";\n                throw;\n            }\n            osutil::randomSleep(1 *1000, 3 *1000);\n        }\n    }\n}\n\nCFlock::CFlock(int fd) :\n    m_fd(fd)\n{}\n\nCFlock::~CFlock()\n{\n    if(m_isLockedSH || m_isLockedEX){\n        try {\n            unlock();\n        } catch (const os::ExcOs& e) {\n            // should never happen\n            std::cerr << e.what() << \"\\n\";\n        }\n    }\n}\n\nvoid CFlock::lockExclusive()\n{\n    assert(checkFdFlockFlags(m_fd, LOCK_EX));\n    if(m_isLockedSH){\n        throw QExcProgramming(\"Due to NFS issues, upgrading shared to exclusive \"\n                              \"locks is not supported. Please unlock() first.\");\n    }\n    doLockNB(m_fd, LOCK_EX);\n    m_isLockedSH = false;\n    m_isLockedEX = true;\n}\n\nvoid CFlock::lockShared()\n{\n    assert(checkFdFlockFlags(m_fd, LOCK_SH));\n    doLockNB(m_fd, LOCK_SH);\n    m_isLockedEX = false;\n    m_isLockedSH = true;\n}\n\nvoid CFlock::unlock()\n{\n    os::flock(m_fd, LOCK_UN);\n    m_isLockedEX = false;\n    m_isLockedSH = false;\n}\n"
  },
  {
    "path": "src/common/oscpp/cflock.h",
    "content": "#pragma once\n\n\n/// Wrapper class for flock.\n/// Due to NFS emulating flock as fcntl(2) byte-range locks\n/// the fd open mode must match the locking operations:\n/// In order to place a shared lock, fd must be open for reading,\n/// In order to place an exclusive lock, fd must be open for writing. To\n/// place both types of lock, open a file read-write.\nclass CFlock\n{\npublic:\n    /// fd should in general be opened read-write (see above)!\n    CFlock(int fd);\n    ~CFlock();\n\n    void lockExclusive();\n    void lockShared();\n    void unlock();\n\n\n\npublic:\n    CFlock(const CFlock &) = delete ;\n    void operator=(const CFlock &) = delete ;\n\nprivate:\n    int m_fd;\n    bool m_isLockedSH{false};\n    bool m_isLockedEX{false};\n};\n\n"
  },
  {
    "path": "src/common/oscpp/excos.cpp",
    "content": "\n#include <unistd.h>\n#include <cstring>\n#include <iostream>\n#include <utility>\n\n\n#include \"excos.h\"\n#include \"translation.h\"\n\n\nos::ExcOsCommon::ExcOsCommon(std::string text) :\n    m_descrip(std::move(text))\n{}\n\nconst char *os::ExcOsCommon::what() const noexcept\n{\n    return m_descrip.c_str();\n}\n\nos::ExcOsCommon::ExcOsCommon()\n= default;\n\n\nos::ExcOs::ExcOs(const std::string &preamble) : m_errorNumber(errno)\n  {\n    if(preamble.empty()){\n        m_descrip += \"ExcOs occurred:\";\n    } else {\n        m_descrip += preamble;\n    }\n    m_descrip += \" (\" + std::to_string(errno) +\n            \"): \" + translation::strerror_l(errno);\n#ifndef NDEBUG\n    // maybe_todo: also outside of debug - performance?\n    m_descrip += \"\\n\" + generate_trace_string();\n#endif\n}\n\n/// If errorNumber is zero, no preamble is autogenerated.\nos::ExcOs::ExcOs(const std::string &preamble, int errorNumber) : m_errorNumber(errorNumber)\n  {\n    if(preamble.empty()){\n        m_descrip += \"ExcOs occurred:\";\n    } else {\n        m_descrip += preamble;\n    }\n    if(errorNumber != 0){\n        m_descrip += \" (\" + std::to_string(errorNumber) +\n                \"): \" + translation::strerror_l(errorNumber);\n    }\n\n}\n\n\nint os::ExcOs::errorNumber() const\n{\n    return m_errorNumber;\n}\n\n\n\n/// @param status: depending on typeOfTerm, currently only the signal number (NOT_IMPLEMENTED)\nos::ExcProcessExitNotNormal::ExcProcessExitNotNormal(int status, TypeOfTerm typeOfTerm) :\n    ExcOsCommon (\"Process terminated not normally: \" + std::to_string(status)),\n    m_status(status),\n    m_typeOfTermination(typeOfTerm)\n{}\n\nint os::ExcProcessExitNotNormal::status() const\n{\n    return m_status;\n}\n\nos::ExcProcessExitNotNormal::TypeOfTerm\nos::ExcProcessExitNotNormal::typeOfTermination() const\n{\n    return m_typeOfTermination;\n}\n\n\n"
  },
  {
    "path": "src/common/oscpp/excos.h",
    "content": "#pragma once\n\n#include <string>\n#include <exception>\n\nnamespace os {\n\nclass ExcOsCommon : public std::exception\n{\npublic:\n    ExcOsCommon(std::string  text);\n\n    const char *what () const noexcept override;\nprotected:\n    ExcOsCommon();\n\n    std::string m_descrip;\n};\n\n\n/// Exception with custom preamble which automatically\n/// determines errno and builds an error description string\n/// on what().\nclass ExcOs : public ExcOsCommon\n{\npublic:\n    ExcOs(const std::string & preamble=std::string());\n    ExcOs(const std::string & preamble, int errorNumber);\n    int errorNumber() const;\n\nprotected:\n    int m_errorNumber;\n};\n\nclass ExcTooFewBytesWritten : public ExcOs {\npublic:\n    using ExcOs::ExcOs;\n};\n\n\nclass ExcReadLink : public ExcOs {\npublic:\n    using ExcOs::ExcOs;\n};\n\nclass ExcProcessExitNotNormal : public ExcOsCommon {\npublic:\n    enum TypeOfTerm { SIG, COREDUMP, NOT_IMPLEMENTED };\n\n    ExcProcessExitNotNormal(int status, TypeOfTerm typeOfTerm);\n\n    int status() const;\n\n    TypeOfTerm typeOfTermination() const;\n\nprotected:\n    int m_status;\n    TypeOfTerm m_typeOfTermination;\n};\n\n\n\n} // namespace os\n"
  },
  {
    "path": "src/common/oscpp/fdentries.cpp",
    "content": "\n#include <string>\n#include <iostream>\n\n#include \"fdentries.h\"\n#include \"os.h\"\n\n\nosutil::FdEntries::Iterator::Iterator(DIR *dir, int dirfd):\n    m_iter_dir(dir),\n    m_iter_fd(-1),\n    m_iter_dirfd(dirfd)\n{\n    if(dir != nullptr){\n        this->operator++();\n    } // else we are the end-iterator\n\n}\n\nosutil::FdEntries::Iterator osutil::FdEntries::Iterator::operator++() {\n    struct dirent *ent;\n    while ((ent = ::readdir (m_iter_dir)) != nullptr) {\n        if(ent->d_name[0] == '.'){\n            continue;\n        }\n        int fd = std::stoi(ent->d_name);\n        if(fd == m_iter_dirfd){\n            continue;\n        }\n        m_iter_fd = fd;\n        return *this;\n    }\n    m_iter_fd = -1;\n    return *this;\n}\n\nbool osutil::FdEntries::Iterator::operator!=(const FdEntries::Iterator &other) const {\n    return m_iter_fd != other.m_iter_fd;\n}\n\nint osutil::FdEntries::Iterator::operator*() const {\n    return m_iter_fd;\n}\n\n\nosutil::FdEntries::FdEntries()\n{\n    m_dir =  ::opendir (\"/proc/self/fd\");\n    if (m_dir == nullptr) {\n        throw os::ExcOs(\"opendir failed: /proc/self/fd \");\n    }\n    m_dirLoc = telldir(m_dir);\n    if(m_dirLoc == -1){\n        throw os::ExcOs(\"telldir failed\");\n    }\n}\n\nosutil::FdEntries::~FdEntries()\n{\n    if(closedir (m_dir) == -1){\n        std::cerr << __func__ << \" closedir failed: \" << strerror(errno)\n                  << \"(\" << errno <<\")\\n\";\n    }\n}\n\nosutil::FdEntries::Iterator osutil::FdEntries::begin() const {\n    ::seekdir(m_dir, m_dirLoc);\n    return {m_dir, dirfd(m_dir)};\n}\n\nosutil::FdEntries::Iterator osutil::FdEntries::end() const {\n    return {nullptr, -1};\n}\n\n\n\n\n"
  },
  {
    "path": "src/common/oscpp/fdentries.h",
    "content": "#pragma once\n\n#include <dirent.h>\n\n#include \"util.h\"\n\nnamespace osutil {\n\n\n/// Allow iterating for entries in /proc/self/fd.\n/// The dir-fd internally used is skipped.\nclass FdEntries\n{\npublic:\n    class Iterator {\n        friend class FdEntries;\n    public:\n        Iterator operator++();\n        bool operator!=(const Iterator & other) const;\n        int operator*() const;\n    private:\n        Iterator(DIR * dir, int dirfd);\n\n        DIR* m_iter_dir;\n        int m_iter_fd;\n        int m_iter_dirfd; // fd of *our* DIR stream\n    };\n\npublic:\n    FdEntries();\n    ~FdEntries();\n\n    Iterator begin() const;\n    Iterator end() const;\n\npublic:\n    Q_DISABLE_COPY(FdEntries)\n    DEFAULT_MOVE(FdEntries)\n\nprivate:\n\n    DIR* m_dir;\n    long m_dirLoc;\n};\n\n\n}\n"
  },
  {
    "path": "src/common/oscpp/os.cpp",
    "content": "\n\n\n#include <sys/mount.h>\n#include <sys/utsname.h>\n#include <pwd.h>\n#include <fcntl.h>\n#include <climits>\n#include <dirent.h>\n#include <cstring>\n#include <sys/wait.h>\n#include <sys/socket.h>\n#include <sys/file.h>\n#include <sys/sendfile.h>\n\n#include <cstdio>\n#include <cassert>\n\n#include <algorithm>\n#include <sstream>\n#include <iostream>\n#include <iterator>\n#include <dlfcn.h>\n\n#include \"os.h\"\n#include \"excos.h\"\n#include \"cleanupresource.h\"\n#include \"osutil.h\"\n\nconst int os::OPEN_WRONLY = O_WRONLY;\nconst int os::OPEN_RDONLY = O_RDONLY;\nconst int os::OPEN_RDWR = O_RDWR;\nconst int os::OPEN_CLOEXEC = O_CLOEXEC;\nconst int os::OPEN_NONBLOCK = O_NONBLOCK;\nconst int os::OPEN_CREAT = O_CREAT;\nconst int os::OPEN_EXCL = O_EXCL;\nconst int os::OPEN_TRUNC = O_TRUNC;\n\n\n\nstatic bool& retryOnInterrupt(){\n    thread_local static bool retryIt = false;\n    return retryIt;\n}\n\n\nvoid os::setRetryOnInterrupt(bool val)\n{\n    bool & valRef = retryOnInterrupt();\n    valRef = val;\n}\n\n/// @throws ExcOs\nos::stat_t os::fstat(int fd)\n{\n    struct stat stat_;\n    if(::fstat(fd, &stat_) == -1){\n        throw ExcOs(\"fstat \" + std::to_string(fd) + \" failed\");\n    }\n    return stat_;\n}\n\n/// @throws ExcOs\nos::stat_t os::stat(const char *filename)\n{\n    os::stat_t st;\n    if(::stat(filename, &st) == -1){\n        throw ExcOs(\"stat \" + std::string(filename) + \" failed\");\n    }\n    return st;\n}\n\n/// @throws ExcOs\nvoid os::getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid)\n{\n\n    if(::getresgid(rgid, egid, sgid) == -1) {\n         throw ExcOs(\"getresgid failed\");\n    }\n\n}\n\n/// @throws ExcOs\nvoid os::getresuid(uid_t *ruid, uid_t *euid, uid_t *suid)\n{\n    if(::getresuid(ruid, euid, suid) == -1) {\n         throw ExcOs(\"getresuid failed\");\n    }\n}\n\n/// @throws ExcOs\nvoid os::setgid(gid_t gid)\n{\n    if(::setgid(gid) == -1){\n        throw ExcOs(\"setgid failed\");\n    }\n\n}\n\n/// @throws ExcOs\nvoid os::setuid(uid_t uid)\n{\n    if(::setuid(uid) == -1){\n        throw ExcOs(\"setuid failed\");\n    }\n}\n\nstd::string os::getHomeDir()\n{\n    char *homedir = getenv(\"HOME\");\n    // fallback\n    if (homedir == nullptr) {\n        homedir = ::getpwuid(getuid())->pw_dir;\n    }\n    return homedir;\n}\n\n\nstd::string os::getCacheDir()\n{\n    char *cacheDir = getenv(\"XDG_CACHE_HOME\");\n    std::string cacheDirStr;\n    // fallback\n    if (cacheDir == nullptr) {\n        cacheDirStr = os::getHomeDir() + \"/.cache\";\n    } else {\n        cacheDirStr = cacheDir;\n    }\n    return cacheDirStr;\n}\n\n\n/// @throws ExcOs\nvoid os::close(int fd)\n{\n    if(::close(fd) == -1){\n        throw ExcOs(\"close failed for fd \" + std::to_string(fd));\n    }\n}\n\n\n/// @throws ExcOs\nint os::open(const char*  filename, int flags, bool clo_exec, mode_t mode){\n    if(clo_exec){\n        flags |= O_CLOEXEC;\n    }\n    // quoting the man:\n    // the mode argument must be supplied when\n    // O_CREAT or O_TMPFILE is specified in flags; if neither O_CREAT\n    // nor O_TMPFILE is specified, then mode is ignored.\n    // So we always pass the mode.\n    int fd = ::open(filename, flags, mode);\n    if(fd == -1) {\n        throw ExcOs(\"open \" + std::string(filename) + \" failed\");\n    }\n    return fd;\n}\n\n\n\n/// @param throwIfLessBytesWritten: if true, throw if the number of written bytes\n/// is less than requested (in param n)\n/// @throws ExcOs, ExcTooFewBytesWritten\nssize_t os::write(int fd, const void *buf, size_t n, bool throwIfLessBytesWritten)\n{\n    auto ret = ::write(fd, buf, n);\n    if(ret == -1){\n        throw ExcOs(\"write failed\");\n    }\n    if(throwIfLessBytesWritten && ret < static_cast<ssize_t>(n)){\n        throw ExcTooFewBytesWritten(\"Too few bytes written for file \" +\n                                    osutil::findPathOfFd<std::string>(fd), 0);\n    }\n    return ret;\n}\n\n/// @overload\n/// @throws ExcOs\nssize_t os::write(int fd, const std::string &buf, bool throwIfLessBytesWritten)\n{\n    return os::write(fd, buf.c_str(), buf.size(),\n                     throwIfLessBytesWritten);\n}\n\n/// @overload\n/// @throws ExcOs\nssize_t os::write(int fd, const QByteArray &buf, bool throwIfLessBytesWritten)\n{\n    return os::write(fd, buf.data(), static_cast<size_t>(buf.size()),\n                     throwIfLessBytesWritten);\n}\n\n\n\n/// Return a pipe-array, where idx 0 holds read- idx 1\n/// holds the write end\n/// @throws ExcOs\nos::Pipes_t os::pipe(int flags, bool clo_exec)\n{\n    if(clo_exec){\n        flags |= O_CLOEXEC;\n    }\n\n    Pipes_t fds;\n    if(::pipe2(fds.data(), flags) == -1){\n        throw ExcOs(\"pipe failed, used flags: \" + std::to_string(flags));\n    }\n    return fds;\n}\n\n/// @throws ExcOs\npid_t os::fork()\n{\n    auto pid = ::fork();\n    if(pid == -1){\n        throw ExcOs(\"fork failed\");\n    }\n    return pid;\n}\n\n\n/// @throws ExcOs\nvoid os::unlinkat(int dirfd, const char *pathname, int flags)\n{\n    if(::unlinkat(dirfd, pathname, flags) == -1){\n        throw ExcOs(std::string(\"unlinkat failed for \") + pathname);\n    }\n}\n\n/// @throws ExcOs\nvoid os::umount(const std::string &specialFile)\n{\n    int res = ::umount(specialFile.c_str());\n    if(res == -1){\n        throw ExcOs(\"umount failed\");\n    }\n}\n\n\n/// @throws ExcOs\nvoid os::setpriority(int which, id_t who, int prio)\n{\n    if(::setpriority(which, who, prio) == -1){\n         throw ExcOs(\"setpriority failed\");\n    }\n}\n\n\n/// @throws ExcOs\nvoid os::setegid(gid_t gid)\n{\n    if(::setegid(gid) == -1){\n        throw ExcOs(\"setegid failed\");\n    }\n}\n\n/// @throws ExcOs\nvoid os::seteuid(uid_t uid)\n{\n    if(::seteuid(uid) == -1){\n        throw ExcOs(\"seteuid failed\");\n    }\n\n}\n\nuid_t os::getuid()\n{\n    return ::getuid();\n}\n\ngid_t os::getgid()\n{\n    return ::getgid();\n}\n\n/// @throws ExcOs\nuid_t os::getsuid()\n{\n   uid_t ruid, euid, suid;\n   os::getresuid(&ruid, &euid, &suid);\n   return suid;\n}\n\n/// @throws ExcOs\ngid_t os::getsgid()\n{\n    gid_t rgid, egid, sgid;\n    os::getresgid(&rgid, &egid, &sgid);\n    return sgid;\n}\n\nvoid os::mkfifo(const char *pathname, mode_t mode){\n    if(::mkfifo(pathname, mode) == -1){\n        throw os::ExcOs(std::string(\"mkfifo \") + pathname + \" failed\");\n    }\n}\n\n/// aquivalent of 'mkdir -p' to create necessary\n/// parts of a path recursively\nvoid os::mkpath(std::string s, mode_t mode)\n{\n    size_t pre=0, pos;\n    std::string dir;\n    int mdret;\n    if(s[s.size()-1]!='/'){\n        s+='/';\n    }\n\n    while((pos=s.find_first_of('/',pre))!=std::string::npos){\n        dir=s.substr(0,pos++);\n        pre=pos;\n        if(dir.empty()){\n            continue;\n        }\n        if((mdret=mkdir(dir.c_str(),mode)) && errno!=EEXIST){\n            throw ExcOs(\"mkpath failed\");\n        }\n    }\n}\n\n\n\nint __os::openat(int dirfd, const char* filename, int flags, bool clo_exec, mode_t mode)\n{\n    if(clo_exec){\n        flags |= O_CLOEXEC;\n    }\n    int fd = ::openat(dirfd, filename, flags, mode);\n    if(fd == -1){\n        throw os::ExcOs(\"openat \" + std::string(filename) + \" failed\");\n    }\n    return fd;\n}\n\n\n\n/// Be careful, according to man, \"It is unspecified whether the effective group\n/// ID of the calling process is included in the returned list.\"\n/// @throws ExcOs\nos::Groups os::getgroups()\n{\n    os::Groups groups;\n    int ngroups = ::getgroups(0, groups.data());\n    if(ngroups == -1){\n        throw ExcOs(\"getgroups\");\n    }\n    groups.resize(static_cast<os::Groups::size_type>(ngroups));\n    ngroups = ::getgroups(static_cast<int>(groups.size()), groups.data());\n    if(ngroups == -1){\n        throw ExcOs(\"getgroups\");\n    }\n    return groups;\n}\n\nuid_t os::geteuid()\n{\n    return ::geteuid();\n}\n\ngid_t os::getegid()\n{\n    return ::getegid();\n}\n\n\n/// returns a list of all group-ids on the system (/etc/group)\n/// @throws ExcOs\nstd::vector<gid_t> os::queryGroupIds()\n{    \n    std::vector<gid_t> ids;\n    group* grp;\n    ::setgrent();\n\n    errno = 0;\n    while((grp = getgrent()) != nullptr){\n        ids.push_back(grp->gr_gid);\n    }\n    if(errno != 0){\n        throw ExcOs(\"getgrent\");\n    }\n    endgrent();\n    return ids;\n}\n\n\n\n\n/// @throws ExcKernelVersionParse, ExcOs\nos::KernelVersion os::getKernelVersion()\n{\n    utsname uname_;\n    if(uname(&uname_) == -1){\n         throw ExcOs(\"uname\");\n    }\n    KernelVersion version;\n    os::KernelVersion::size_type verIdx=0;\n    std::string release = uname_.release;\n    std::string currentNumber;\n    for(const char c : release){\n        if(std::isdigit(c)){\n            currentNumber += c;\n        } else{\n            version[static_cast<KernelVersion::size_type>(verIdx++)] = std::stoi(currentNumber);\n            currentNumber.clear();\n            if(verIdx == version.size()){\n                break;\n            }\n        }\n    }\n    if(verIdx != version.size()){\n        throw ExcKernelVersionParse();\n    }\n    return version;\n}\n\nint os::unshare(int flags)\n{\n    int ret = ::unshare(flags);\n    if(ret == -1){\n        throw ExcOs(\"unshare failed\");\n    }\n    return ret;\n}\n\n/// returns the names of the directory contents\n/// @throws ExcOs\nstd::vector<std::string>  os::ls(const std::string &dirname_, os::DirFilter filter)\n{\n    DIR *dir;\n    struct dirent *ent;\n    if ((dir = ::opendir (dirname_.c_str())) == nullptr) {\n        throw ExcOs(\"opendir failed: \" + dirname_);\n    }\n    auto closeLater = finally([&dir] {  closedir (dir);  });\n\n    std::vector<std::string> files;\n    while ((ent = ::readdir (dir)) != nullptr) {\n        if((filter & DirFilter::NoDot && strcmp(ent->d_name, \".\" ) == 0) ||\n           (filter & DirFilter::NoDotDot && strcmp(ent->d_name, \"..\" ) == 0)){\n            continue;\n        }\n        files.emplace_back(ent->d_name);\n    }\n\n    return files;\n}\n\n\n\n\npid_t os::getpid()\n{\n    return ::getpid();\n}\n\n/// @param cleanStatusOnSuccess:: If the child terminated normally, clean status, so\n/// it only contains the 8 least significant bits (WEXITSTATUS).\n/// @throws ExcOs, ExcProcessExitNotNormal\npid_t os::waitpid(pid_t pid, int *status, int options, bool cleanStatusOnSuccess)\n{\n    int internalStatus = 1;\n    if(status == nullptr){\n        status = &internalStatus;\n    }\n    pid_t ret = ::waitpid (pid, status, options) ;\n    if(ret == -1){\n        throw ExcOs(\"waitpid failed for pid \" + std::to_string(pid) );\n    }\n    if (! WIFEXITED (*status)){\n        // process did not call exit and did not return from main normally\n        // find out what happended\n        int extractedStatus=-1;\n        ExcProcessExitNotNormal::TypeOfTerm typeOfTerm = ExcProcessExitNotNormal::NOT_IMPLEMENTED;\n        if(WIFSIGNALED(*status)){\n            extractedStatus = WTERMSIG(*status);\n            if(WCOREDUMP(*status)){\n                typeOfTerm = ExcProcessExitNotNormal::COREDUMP;\n            } else {\n                typeOfTerm = ExcProcessExitNotNormal::SIG;\n            }\n        }\n        // There are some other cases, which could be checked:\n        // WIFSTOPPED/WIFCONTINUED\n\n        throw ExcProcessExitNotNormal(extractedStatus, typeOfTerm);\n    }\n    if(cleanStatusOnSuccess){\n        *status = WEXITSTATUS(*status);\n    }\n\n    return ret;\n}\n\n/// @throws ExcOs\nvoid os::exec (const char *filename, char * const argv[], char * const envp[])\n{\n    if(envp == nullptr){\n        envp = environ;\n    }\n    ::execvpe(filename, argv,  envp);\n    // only get here on error\n    throw ExcOs(\"executing \" + std::string(filename) + \" failed\" );\n\n}\n\n/// @throws ExcOs\nvoid os::exec(const std::vector<std::string> &args, char * const envp[])\n{\n    if (args.empty()) {\n        throw std::invalid_argument(\n                    \"exec called with empty args\");\n    }\n\n    std::vector<char*> pointerVec(args.size() + 1 ); // + 1 because of terminating NULL\n    for(unsigned i = 0; i < args.size() ; ++i) {\n        pointerVec[i] = const_cast<char*>(args[i].c_str());\n    }\n    pointerVec.back() = nullptr;\n    char** result = pointerVec.data();\n    os::exec(result[0], result, envp);\n}\n\n\n\n\n/// @throws ExcOs\nvoid os::setgroups(const os::Groups &groups)\n{\n    if(::setgroups(groups.size(), groups.data()) == -1){\n        std::stringstream result;\n        std::copy(groups.begin(), groups.end(), std::ostream_iterator<int>(result, \" \"));\n        throw ExcOs(\"setgroups failed. Used groups: \" + result.str() );\n    }\n}\n\n/// @throws ExcOs\noff_t os::lseek (int fd, off_t offset, int whence)\n{\n    off_t ret = ::lseek(fd, offset, whence);\n    if(ret == -1){\n        throw os::ExcOs(\"lseek failed. fd: \" + std::to_string(fd)\n                    + \" offset: \" + std::to_string(offset));\n    }\n    return ret;\n}\n\n/// Like ftell for a file descriptor\n/// @throws ExcOs\noff_t os::ltell(int fd)\n{\n    return os::lseek(fd, 0, SEEK_CUR);\n\n}\n\nvoid os::mount(const char *source, const char *target, const char *fstype, unsigned long rwflag, const void *data)\n{\n    if(::mount(source, target, fstype, rwflag, data) == -1){        \n        throw ExcOs(\"Mount from \" + strFromCString(source) + \" to \"\n                    + strFromCString(target) + \" failed\");\n    }\n}\n\n\n/// @throws ExcOs\nvoid os::mount(const std::string &source, const std::string& target, const char *fstype,\n               unsigned long rwflag, const void *data)\n{\n    os::mount(source.c_str(), target.c_str(), fstype, rwflag, data);\n}\n\n/// @throws ExcOs\nvoid *os::dlsym(void *handle, const char *symbol)\n{\n    auto sym_ = ::dlsym(handle, symbol);\n    if(sym_ == nullptr){\n        char* errStr = dlerror();\n        if(errStr == nullptr){\n            throw ExcOs(\"dlsym returned null, but dlerror was also null...\");\n        }\n        throw ExcOs(\"dlsym failed: \" + std::string(errStr));\n    }\n    return sym_;\n}\n\n\n/// @throws ExcOs\nvoid os::setns(int fd, int nstype)\n{\n    if(::setns(fd, nstype) == -1 ){\n        throw ExcOs(\"Failed to enter namespace \" + std::to_string(nstype));\n    }\n}\n\n\npid_t os::setsid()\n{\n    pid_t sid = ::setsid();\n    if(sid == static_cast<pid_t>(- 1) ){\n        throw ExcOs(\"setsid failed\");\n    }\n    return sid;\n}\n\n\n\nbool os::exists(const std::string &name)\n{\n      struct stat buffer;\n      return (::stat (name.c_str(), &buffer) == 0);\n}\n\n/// Shortcut for fcntl(F_SETFD, ...\n/// @throws ExcOs\nvoid os::setFdDescriptorFlags(int fd, int flags)\n{\n    if(::fcntl(fd, F_SETFD, flags) == -1){\n        throw ExcOs(std::string(__func__) + \" failed for fd \"+\n                    std::to_string(fd)\n                    + \" (flags \" + std::to_string(flags) + \")\"\n                    );\n    }\n}\n\nvoid os::setFdStatusFlags(int fd, int flags){\n    if(::fcntl(fd, F_SETFL, flags) == -1){\n        throw ExcOs(std::string(__func__) + \" failed for fd \"+\n                    std::to_string(fd)\n                    + \" (flags \" + std::to_string(flags) + \")\"\n                    );\n    }\n}\n\n\n\nint os::dup(int oldfd){\n    int newfd = ::dup(oldfd);\n    if(newfd == -1){\n        throw ExcOs(std::string(__func__) + \" failed for fd \"+\n                    std::to_string(oldfd)\n                    );\n    }\n    return newfd;\n}\n\nvoid os::dup2(int oldfd, int newfd)\n{\n    if(::dup2(oldfd, newfd) == -1){\n        throw ExcOs(std::string(__func__) + \" failed for fds \"+\n                    std::to_string(oldfd) + \", \" + std::to_string(newfd)\n                    );\n    }\n}\n\nvoid os::dup3(int oldfd, int newfd, int flags)\n{\n    if(::dup3(oldfd, newfd, flags) == -1){\n        throw ExcOs(std::string(__func__) + \" failed for fds \"+\n                    std::to_string(oldfd) + \", \" + std::to_string(newfd)\n                    );\n    }\n}\n\n/// @throws ExcOs\nvoid os::fchdir(int fd)\n{\n    if(::fchdir(fd) == -1){\n        throw ExcOs(\"fchdir failed\");\n    }\n}\n\n\n/// @throws ExcOs\nvoid os::fchmod(int fd, mode_t mode)\n{\n    if(::fchmod(fd, mode) == -1){\n        throw ExcOs(\"fchmod failed\");\n    }\n}\n\nvoid os::sigaction(int signum, const struct sigaction *act,\n                   struct sigaction *oldact)\n{\n    if(::sigaction(signum, act, oldact) == -1){\n        throw ExcOs(std::string(__func__) + \"failed\");\n    }\n}\n\n\n/// @throws ExcOs\n/// @return the signal number\nint os::sigwait(const sigset_t *set)\n{\n    int sig;\n    int ret = ::sigwait(set, &sig);\n    if(ret != 0){\n        // not using errno here!\n        throw ExcOs(\"sigwait failed\", ret);\n    }\n    return sig;\n}\n\n/// @throws ExcOs\nvoid os::sigfillset(sigset_t *set)\n{\n    if(::sigfillset(set) != 0){\n        throw ExcOs(\"sigfillset failed\");\n    }\n}\n\n/// Always returns the old handler\n/// @throws ExcOs\nsighandler_t os::signal(int sig, sighandler_t handler)\n{\n    auto oldhandler = ::signal(sig, handler);\n    if ( oldhandler == SIG_ERR) {\n        throw ExcOs(\"signal failed\");\n    }\n    return oldhandler;\n}\n\nvoid os::symlink(const char *target, const char *linkpath)\n{\n    if(::symlink(target, linkpath) == -1){\n        throw ExcOs(\"symlink failed\");\n    }\n}\n\n\n/// @throws ExcOs\nvoid os::chdir(const char *path)\n{\n    if(::chdir(path) == -1){\n        throw ExcOs(\"chdir failed\");\n    }\n}\n\nvoid os::chdir(const std::string &path)\n{\n    return os::chdir(path.data());\n}\n\n/// shortcut for fcntl(fd , F_GETFL)\n/// @throws ExcOs\nint os::getFdStatusFlags(int fd)\n{\n    int statusflags = fcntl(fd , F_GETFL);\n    if(statusflags == -1){\n        throw ExcOs(\"failed to get status flags from fd \" + std::to_string(fd));\n    }\n    return statusflags;\n}\n\n/// shortcut for fcntl(fd , F_GETFD)\n/// @throws ExcOs\nint os::getFdDescriptorFlags(int fd)\n{\n    int statusflags = fcntl(fd , F_GETFD);\n    if(statusflags == -1){\n        throw ExcOs(\"failed to get descriptor flags from fd \" + std::to_string(fd));\n    }\n    return statusflags;\n}\n\n/// @throws ExcOs\nssize_t os::read(int fd, void *buf, size_t nbytes, bool retryOnInterrupt)\n{\n    while (true) {\n        auto read = ::read(fd, buf, nbytes);\n        if(read == -1){\n            if(retryOnInterrupt && errno == EINTR){\n                continue;\n            }\n            throw ExcOs(\"read failed\");\n        }\n        return read;\n    }\n}\n\nvoid os::readlinkat(int dirfd, const char *filename, std::string &output){\n    folly::resizeWithoutInitialization(output, PATH_MAX);\n    const ssize_t path_len = ::readlinkat(dirfd, filename, &output[0], PATH_MAX);\n    if (path_len == -1 ){\n        throw ExcReadLink(\"readlinkat failed for file \" + std::string(filename));\n    }\n    folly::resizeWithoutInitialization(output, static_cast<typename std::string::size_type>(path_len));\n}\n\nvoid os::rmdir(const char *path){\n    if (::rmdir(path) == -1 ){\n        throw ExcReadLink(\"rmdir failed for \" + std::string(path));\n    }\n}\n\n/// @return the number of bytes send\n/// @throws ExcOs\nsize_t os::sendmsg(int fd, const msghdr *message, int flags)\n{\n    while (true){\n        ssize_t ret = ::sendmsg(fd, message, flags);\n        if (ret == -1) {\n            if(retryOnInterrupt() && errno == EINTR){\n                continue;\n            }\n            throw ExcOs(std::string(__func__) + \" failed\");\n        }\n        return static_cast<size_t>(ret);\n    }\n\n\n\n}\n\n/// offset of in_fd is *not* modified\n/// @return number of sent bytes\noff_t os::sendfile(int out_fd, int in_fd, size_t count, off_t offset)\n{\n    ssize_t sizeToSend = count;\n    while (true) {\n        auto sent = ::sendfile(out_fd, in_fd, &offset, sizeToSend);\n        if(sent == -1){\n            throw ExcOs(std::string(__func__) + \" failed\");\n        }\n        sizeToSend -= static_cast<size_t>(sent);\n        assert(sizeToSend >= 0);\n        if(sizeToSend <= 0 || sent == 0){\n            break;\n        }\n    }\n    return count - sizeToSend;\n}\n\n/// returns the number of bytes received\n/// @throws ExcOs\nsize_t os::recvmsg(int fd, msghdr *message, int flags)\n{\n    while (true){\n        ssize_t ret = ::recvmsg(fd, message, flags);\n        if (ret == -1) {\n            if(retryOnInterrupt() && errno == EINTR){\n                continue;\n            }\n            throw ExcOs(std::string(__func__) + \" failed\");\n        }\n        return static_cast<size_t>(ret);\n    }\n}\n\nos::SocketPair_t os::socketpair(int domain, int type_, int protocol)\n{\n    SocketPair_t pair;\n    if(::socketpair(domain, type_, protocol, pair.data()) == -1){\n        throw ExcOs(std::string(__func__) + \" failed\");\n    }\n    return pair;\n}\n\n\n\nvoid os::unsetenv(const char *name)\n{\n    if(::unsetenv(name) == -1){\n        throw ExcOs(std::string(__func__) + \" failed\");\n    }\n}\n\n\n/// Return a rather random array of signals which are catchable and would by default\n/// cause a process to end.\nconst std::vector<int>& os::catchableTermSignals()\n{\n    static const std::vector<int> sigs {SIGHUP, SIGINT, SIGQUIT, SIGTERM, SIGPIPE};\n    return sigs;\n}\n\n\nvoid os::flock(int fd, int operation)\n{\n    if(::flock(fd, operation) == -1){\n        throw os::ExcOs(\"flock failed\");\n    }\n\n}\n\n\n\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "src/common/oscpp/os.h",
    "content": "#pragma once\n\n\n#include <sys/stat.h>\n#include <grp.h>\n\n#include <pwd.h>\n#include <unistd.h>\n#include <linux/limits.h>\n#include <string>\n#include <array>\n#include <vector>\n#include <QVector>\n#include <deque>\n#include <csignal>\n#include <QtGlobal>\n\n#include \"excos.h\"\n#include \"util.h\"\n\n\n/// Private functions - internal use\nnamespace __os {\n\nint openat(int dirfd, const char* filename, int flags, bool clo_exec, mode_t mode);\n\n}\n\n\n\n/// Simple wrappers for several os calls\n/// which throw exceptions on error.\nnamespace os {\n\nvoid setRetryOnInterrupt(bool val);\n\n// read/write by owner, read only by group and others\nstatic const int DEFAULT_CREAT_FLAGS = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;\n\nextern const int OPEN_WRONLY;\nextern const int OPEN_RDONLY;\nextern const int OPEN_RDWR;\nextern const int OPEN_CLOEXEC;\nextern const int OPEN_NONBLOCK;\nextern const int OPEN_EXCL;\nextern const int OPEN_CREAT;\nextern const int OPEN_TRUNC;\n\n\nclass ExcKernelVersionParse : public std::exception\n{\n};\n\ntypedef std::array<int, 2> Pipes_t;\ntypedef std::array<int, 2> SocketPair_t;\ntypedef std::vector<gid_t> Groups;\ntypedef std::array<int, 3> KernelVersion; // major minor patch\ntypedef struct stat stat_t;\n\n\nenum DirFilter { NoDot=0x2000, NoDotDot=0x4000, NoDotAndDotDot=NoDot | NoDotDot };\n\nconst std::vector<int> &catchableTermSignals();\n\nvoid chdir(const char *path);\nvoid chdir(const std::string& path);\ntemplate <class Str_t>\nvoid chmod(const Str_t& path, mode_t mode);\n\nvoid close(int fd);\n\nvoid *dlsym (void *handle, const char *symbol);\n\npid_t fork();\n\nstat_t fstat(int fd);\nstat_t stat(const char* filename);\n\nint dup(int oldfd);\nvoid dup2(int oldfd, int newfd);\nvoid dup3(int oldfd, int newfd, int flags);\n\nbool exists(const std::string& name);\n\nvoid fchdir(int fd);\nvoid fchmod(int fd, mode_t mode);\n\nint getFdStatusFlags(int fd);\nint getFdDescriptorFlags(int fd);\nstd::string getHomeDir();\nstd::string getCacheDir();\n\nvoid getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid);\nvoid getresuid(uid_t *ruid, uid_t *euid, uid_t *suid);\n\nuid_t getuid();\nuid_t geteuid();\nuid_t getsuid();\n\ngid_t getgid();\ngid_t getegid();\ngid_t getsgid();\n\nGroups getgroups();\n\npid_t getpid();\ntemplate <class Str_t>\nStr_t getUserName();\n\n\n[[ noreturn ]] void exec(const char *filename, char *const argv[],\n                         char *const envp[]=nullptr);\n[[ noreturn ]] void exec(const std::vector<std::string> &args,\n                         char *const envp[]=nullptr);\ntemplate <class ContainerT>\n[[ noreturn ]] void exec(const ContainerT &args,\n                         char *const envp[]=nullptr);\n\nvoid flock(int fd, int operation);\n\nstd::vector<std::string> ls(const std::string & dirname_,\n                            DirFilter filter=DirFilter::NoDotAndDotDot);\n\n\noff_t lseek (int fd, off_t offset, int whence);\noff_t ltell(int fd);\n\nvoid mkfifo(const char *pathname, mode_t mode);\nvoid mkpath(std::string s, mode_t mode=0755);\nvoid mount (const std::string & source, const std::string &target,\n          const char *fstype, unsigned long int rwflag,\n          const void *data=nullptr);\nvoid mount (const char* source, const char* target,\n            const char *fstype, unsigned long int rwflag,\n            const void *data=nullptr);\n\n\nint open(const char*  filename, int flags, bool clo_exec=true,\n         mode_t mode=DEFAULT_CREAT_FLAGS);\ntemplate <class Str_t>\nint open(const Str_t& filename, int flags, bool clo_exec=true,\n         mode_t mode=DEFAULT_CREAT_FLAGS);\n\n\n// int openat(int dirfd, const char *filename, int flags, bool clo_exec=true,\n//            mode_t mode=DEFAULT_CREAT_FLAGS);\ntemplate <class Str_t>\nint openat(int dirfd, const Str_t& filename, int flags,bool clo_exec=true,\n           mode_t mode=DEFAULT_CREAT_FLAGS );\n\nint unshare (int flags);\n\nPipes_t pipe(int flags=0, bool clo_exec=true);\n\ntemplate <class Str_t>\nStr_t readlink (const char* filename);\ntemplate <class Str_t>\nStr_t readlink (const Str_t & filename);\n\ntemplate <class Str_t>\nStr_t readlinkat (int dirfd, const Str_t & filename);\n\nvoid readlinkat (int dirfd, const char* filename, std::string & output);\n\ntemplate <class Str_t>\nvoid readlinkat (int dirfd, const Str_t & filename, Str_t & output);\n\nssize_t read (int fd, void *buf, size_t nbytes, bool retryOnInterrupt=false);\n\ntemplate <class Str_t>\nStr_t readStr(int fd, size_t nbytes, bool retryOnInterrupt=false);\n\nsize_t recvmsg (int fd, struct msghdr *message, int flags=0);\ntemplate <class Str_t>\nvoid remove(const Str_t & path);\ntemplate <class Str_t>\nvoid rename(const Str_t & old, const Str_t & new_);\nvoid rmdir(const char *path);\n\nsize_t sendmsg (int fd, const struct msghdr *message,\n            int flags=0);\n\noff_t sendfile(int out_fd, int in_fd, size_t count, off_t offset=0);\ntemplate <class Str_t>\noff_t sendfile(const Str_t& out_path, const Str_t& in_path, size_t count);\n\nvoid setFdDescriptorFlags(int fd, int flags);\nvoid setFdStatusFlags(int fd, int flags);\n\nvoid setgid (gid_t gid);\nvoid setgroups (const Groups & groups);\nvoid setegid(gid_t gid);\n\nvoid setpriority(int which, id_t who, int prio);\n\ntemplate <class Str_t>\nvoid setenv(const Str_t& name, const Str_t& value,\n            bool overwrite=true);\n\nvoid seteuid(uid_t uid);\nvoid setuid (uid_t uid);\n\nvoid setns (int fd, int nstype);\n\npid_t setsid();\n\nvoid sigaction(int signum, const struct sigaction *act,\n                     struct sigaction *oldact);\nint sigwait(const sigset_t *set);\nvoid sigfillset(sigset_t *set);\n\nsighandler_t signal(int sig, sighandler_t handler);\n\nvoid symlink(const char *target, const char *linkpath);\n\nSocketPair_t socketpair (int domain, int type_, int protocol=0);\n\nvoid unlinkat(int dirfd, const char *pathname, int flags);\n\nvoid umount (const std::string& specialFile);\nvoid unsetenv(const char* name);\n\npid_t waitpid (pid_t pid, int* status=nullptr, int options=0, bool cleanStatusOnSuccess=true);\n\nssize_t write (int fd, const void *buf, size_t n, bool throwIfLessBytesWritten=true);\n\nssize_t write (int fd, const std::string &buf, bool throwIfLessBytesWritten=true);\nssize_t write (int fd, const QByteArray &buf, bool throwIfLessBytesWritten=true);\n\n\nstd::vector<gid_t> queryGroupIds();\n\nKernelVersion getKernelVersion();\n\n} // namespace os\n\ntemplate <class Str_t>\nvoid os::chmod(const Str_t& path, mode_t mode){\n    if(::chmod(strDataAccess(path), mode) == -1){\n        throw ExcOs(\"chmod failed\");\n    }\n}\n\n\ntemplate <class ContainerT>\nvoid os::exec(const ContainerT &args, char * const envp[])\n{\n    if (args.isEmpty()) {\n        throw std::invalid_argument(\n                    \"exec called with empty args\");\n    }\n    os::exec(args[0], (char**)args.data(), envp);\n}\n\n\ntemplate <class Str_t>\nStr_t os::readlinkat (int dirfd, const Str_t & filename){\n    Str_t path;\n    os::readlinkat(dirfd, filename, path);\n    return path;\n}\n\n\n\n\ntemplate <class Str_t>\nvoid os::readlinkat (int dirfd, const Str_t & filename, Str_t & output){\n    output.resize(PATH_MAX);\n    char* buf = strDataAccess(output);\n    const char* filename_cstr = strDataAccess(filename);\n    ssize_t path_len = ::readlinkat(dirfd, filename_cstr, buf, PATH_MAX);\n    if (path_len == -1 ){\n        throw ExcReadLink(\"readlinkat failed for file \" + std::string(filename_cstr));\n    }\n    output.resize(static_cast<typename Str_t::size_type>(path_len));\n}\n\n\n/// @throws ExcReadLink\ntemplate <class Str_t>\nStr_t os::readlink(const char* filename)\n{\n    Str_t path;\n    path.resize(PATH_MAX);\n    char* buf = strDataAccess(path);\n\n    ssize_t path_len = ::readlink(filename, buf, PATH_MAX);\n    if(path_len == -1){\n        throw ExcReadLink(\"readlink failed for file \" + std::string(filename));\n    }\n#if QT_VERSION >= QT_VERSION_CHECK(5, 7, 0)\n    path.resize(static_cast<typename Str_t::size_type>(path_len));\n#else\n    // accept the Warning, at least in QT <=5.3 QByteArray has no size_type\n    path.resize(path_len);\n#endif\n    return path;\n}\n\n\n/// @throws ExcReadLink\ntemplate <class Str_t>\nStr_t os::readlink(const Str_t &filename)\n{\n    return os::readlink<Str_t>(filename.data());\n}\n\n\n/// @throws ExcOs\ntemplate <class Str_t>\nint os::open(const Str_t& filename, int flags, bool clo_exec, mode_t mode){\n    return os::open(strDataAccess(filename), flags, clo_exec, mode);\n}\n\n\n/// @throws ExcOs\ntemplate <class Str_t>\nint os::openat(int dirfd, const Str_t& filename, int flags, bool clo_exec, mode_t mode){    \n    return __os::openat(dirfd, strDataAccess(filename), flags, clo_exec, mode);\n}\n\n/// @return the username of the *real* user\ntemplate <class Str_t>\nStr_t os::getUserName()\n{\n    return ::getpwuid(getuid())->pw_name;\n}\n\n\n/// @throws ExcOs\ntemplate <class Str_t>\nStr_t os::readStr(int fd, size_t nbytes, bool retryOnInterrupt)\n{\n    Str_t buf;\n    buf.resize(nbytes);\n    ssize_t readBytes = os::read(fd, buf.data(), buf.size(), retryOnInterrupt);\n    buf.resize(static_cast<typename Str_t::size_type>(readBytes));\n    return buf;\n}\n\n/// @throws ExcOs\ntemplate <class Str_t>\nvoid os::remove(const Str_t & path){\n    if(::remove(strDataAccess(path)) == -1){\n        throw ExcOs(\"remove failed\");\n    }\n}\n\n/// @throws ExcOs\ntemplate <class Str_t>\nvoid os::rename(const Str_t & old, const Str_t & new_){\n    if(::rename(strDataAccess(old), strDataAccess(new_)) == -1){\n        throw ExcOs(\"rename failed\");\n    }\n}\n\n/// @throws ExcOs\ntemplate <class Str_t>\nvoid os::setenv(const Str_t &name, const Str_t &value, bool overwrite)\n{\n    // in contrast to putenv, setenv makes copies of the passed\n    // string, so we are safe, when value-string goes out of scope\n    if(::setenv(strDataAccess(name), strDataAccess(value), int(overwrite)) == -1){\n         throw ExcOs(\"setenv failed\");\n    }\n}\n\n\ntemplate <class Str_t>\noff_t os::sendfile(const Str_t& out_path, const Str_t& in_path, size_t count){\n    int out_fd=-1, in_fd=-1;\n    try {\n        out_fd = os::open<Str_t>(out_path, os::OPEN_WRONLY | os::OPEN_CREAT);\n        in_fd = os::open<Str_t>(in_path, os::OPEN_RDONLY);\n        auto ret = os::sendfile(out_fd, in_fd, count);\n        close(out_fd);\n        close(in_fd);\n        return ret;\n    } catch (const os::ExcOs&) {\n        if(out_fd != -1) close(out_fd);\n        if(in_fd != -1)  close(in_fd);\n        throw ;\n    }\n}\n\n\n\n\n"
  },
  {
    "path": "src/common/oscpp/oscaps.cpp",
    "content": "#include <sys/prctl.h>\n#include <sys/capability.h>\n#include <iostream>\n\n#include \"oscaps.h\"\n#include \"excos.h\"\n\n\n\nstatic void cap_set_flag_wrapper(cap_t caps,\n                          cap_flag_t typeOfFlag,\n                          const os::Capabilites::CapFlags &flags,\n                          cap_flag_value_t setOrClear){\n    if (cap_set_flag(caps, typeOfFlag,\n                     int(flags.size()), flags.data(), setOrClear) == -1) {\n        throw os::ExcOs(__func__);\n    }\n}\n\n\nos::Capabilites::Capabilites(cap_t caps)\n    : m_caps(caps)\n{}\n\nvoid os::Capabilites::flush()\n{\n    // in future maybe something else but only flush to proc, which\n    // should be set in constructor (enum)\n    this->flushToProc();\n}\n\nos::Capabilites::~Capabilites()\n{\n    if(m_caps != nullptr){\n        if(::cap_free(m_caps) == -1){\n            perror(__func__);\n        }\n    }\n}\n\nvoid os::Capabilites::setFlags(cap_flag_t typeOfFlag, const CapFlags &flags,\n                               bool autoflush)\n{\n    cap_set_flag_wrapper(m_caps, typeOfFlag, flags, CAP_SET);\n    if(autoflush){\n        this->flush();\n    }\n}\n\nvoid os::Capabilites::clearFlags(cap_flag_t typeOfFlag, const CapFlags &flags,\n                                 bool autoflush)\n{\n    cap_set_flag_wrapper(m_caps, typeOfFlag, flags, CAP_CLEAR);\n    if(autoflush){\n        this->flush();\n    }\n}\n\nvoid os::Capabilites::clear(bool autoflush)\n{\n    if( cap_clear(m_caps) == -1){\n        throw os::ExcOs(__func__);\n    }\n    if(autoflush){\n        this->flush();\n    }\n}\n\n/// see also: cap_set_proc\nvoid os::Capabilites::flushToProc()\n{\n    if (::cap_set_proc(m_caps) == -1){\n        throw os::ExcOs(__func__);\n    }\n}\n\n\n/// see also cap_get_proc\nos::Capabilites::Ptr_t os::Capabilites::fromProc()\n{\n    cap_t caps = ::cap_get_proc();\n    if(caps == nullptr){\n        throw os::ExcOs(__func__);\n    }\n    return Ptr_t(new os::Capabilites(caps));\n}\n"
  },
  {
    "path": "src/common/oscpp/oscaps.h",
    "content": "#pragma once\n\n#include <sys/capability.h>\n#include <memory>\n#include <vector>\n\n#include \"util.h\"\n\nnamespace os {\n\n\n/// Simple wrapper around libcap\n/// When leaving autoflush to the default value of true, the set flags are applied\n/// immediately to the process (throws ExsOs on error).\nclass Capabilites{\npublic:\n    typedef std::shared_ptr<Capabilites> Ptr_t;\n    typedef std::vector<cap_value_t> CapFlags;\n\n    ~Capabilites();\n\n    void setFlags(cap_flag_t typeOfFlag ,const CapFlags& flags, bool autoflush=true);\n    void clearFlags(cap_flag_t typeOfFlag ,const CapFlags& flags, bool autoflush=true);\n\n    void clear(bool autoflush=true);\n\n    void flushToProc();\n\n    static Ptr_t fromProc();\n\npublic:\n    Q_DISABLE_COPY(Capabilites)\n    DEFAULT_MOVE(Capabilites)\n\nprivate:\n    Capabilites(cap_t caps);\n    void flush();\n\n    cap_t m_caps;\n};\n\n}\n"
  },
  {
    "path": "src/common/oscpp/osutil.cpp",
    "content": "\n\n\n#include <cassert>\n#include <cstring>\n#include <cstdio>\n#include <fcntl.h>\n#include <unistd.h>\n#include <cerrno>\n#include <sys/resource.h>\n#include <sys/time.h>\n#include <string>\n#include <QDir>\n#include <QFileInfoList>\n#include <poll.h>\n#include <sys/ioctl.h>\n#include <fstream>\n#include <string>\n#include <sstream>\n#include <fcntl.h>\n#include <ext/stdio_filebuf.h>\n#include <thread>\n#include <random>\n#include <chrono>\n\n#include \"osutil.h\"\n#include \"os.h\"\n#include \"pidcontrol.h\"\n#include \"qoutstream.h\"\n#include \"fdentries.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid intertSighandler(int){}\n\n#ifdef __cplusplus\n}\n#endif\n\nint osutil::countOpenFds() {\n     int count = 0;\n     for(const int fd : osutil::FdEntries()){\n         Q_UNUSED(fd)\n         count++;\n     }\n     return count;\n}\n\n\nrlim_t osutil::getMaxCountOpenFiles()\n{\n    struct rlimit rlim;\n    getrlimit(RLIMIT_NOFILE, &rlim);\n    return rlim.rlim_cur;\n}\n\n\n/// @return true, if fd existed within this process\nbool osutil::fdIsOpen(int fd)\n{\n    const std::string fdpath = \"/proc/self/fd/\" + std::to_string(fd);\n    return os::exists(fdpath);\n}\n\n\n/// @return true, if st1 and st2 refer to the same file (device/inode)\nbool osutil::sameFile(const os::stat_t& st1, const os::stat_t& st2)\n{\n    return st1.st_dev == st2.st_dev &&\n            st1.st_ino == st2.st_ino;\n}\n\n\n/// Get the file access mode, file status flags and *some* 'file creation flags' using\n/// /proc/$pid/fdinfo/$fd.\n/// The returned flags include O_CLOEXEC.\n/// @param fdInfoDir: an open directory descritor pointing to an fdinfo-dir.\n/// @param fdNb: the file descritor\n/// See also: man 5 proc\nint osutil::retrieveFdFlags(int fdInfoDir, const std::string& fdNb)\n{\n    // Note that fcntl(fd, F_GETFL) does *not* return O_CLOEXEC (and possibly others?).\n    // That flag would have to be obtained *indirectly* by\n    // fcntl(fd , F_GETFD), which, as of March 2019, only has the FD_CLOEXEC-flag\n    // (which has a different value than O_CLOEXEC).\n    std::string octalFlags = parseGenericKeyValFile(fdInfoDir, fdNb, \"flags:\");\n    return std::stoi( octalFlags, nullptr, 8 );\n}\n\n\n/// Get the file access mode, file status flags and *some* 'file creation flags' using fcntl.\n/// The returned flags include O_CLOEXEC (if set).\nint osutil::retrieveFdFlags(int fd)\n{\n    // Note that fcntl(fd, F_GETFL) does *not* return O_CLOEXEC.\n    // That flag can be obtained *indirectly* by\n    // fcntl(fd , F_GETFD), which, as of March 2019, only has the FD_CLOEXEC-flag\n    // (which has a different value than O_CLOEXEC).\n    int statusFlags = os::getFdStatusFlags(fd);\n    int descrFlags = os::getFdDescriptorFlags(fd);\n    if(IsBitSet(descrFlags, FD_CLOEXEC)){\n        setBitIn(statusFlags, O_CLOEXEC);\n    }\n    return statusFlags;\n}\n\n\n/// Reopen an open file decriptor of *this* process by resolving the symlink\n/// /proc/self/fd/$fd points to and passing that path (string) to open(2).\n/// Make sure that the new file descriptor really refers to the\n/// *same* file (it might not be the same path though but instead\n/// another hardlink, which is ignored here).\n/// @return the new file descriptor\n/// @throws ExcOs, especially, if the reopened file has a different device-\n/// inode-combination.\nint osutil::reopenFdByPath(int oldFd, int openflags, bool clo_exec,\n                   bool restoreOffset) {\n    const auto oldStat = os::fstat(oldFd);\n\n    // Race condition in next line...\n    const int newFd = os::open(osutil::findPathOfFd<QByteArray>(oldFd), openflags, clo_exec);\n    // Note: the following is *not* a race-free variant of above call, because after unsharing\n    // the mount-namespace, such an open call results to an fd still belonging to the original\n    // mnt_id.\n    // const int newFd = os::open(\"/proc/self/fd/\" + std::to_string(oldFd), openflags, clo_exec);\n\n    auto closeOnErr = finally([&newFd] { close(newFd); });\n    const auto newStat = os::fstat(newFd);\n    if(! osutil::sameFile(oldStat, newStat)){\n        throw os::ExcOs(\"reopen failed, the new path refers to a \"\n                        \"different file\", 0);\n    }\n    if(restoreOffset){\n        os::lseek(newFd, os::ltell(oldFd), SEEK_SET);\n    }\n    closeOnErr.setEnabled(false);\n    return newFd;\n}\n\n\n\nstd::string osutil::parseGenericKeyValFile(int dirFd,\n                                           const std::string &filename,\n                                           const std::string &key)\n{\n    int fd = os::openat(dirFd, filename.c_str(), O_RDONLY);\n    // closes fd in destrcutor\n    __gnu_cxx::stdio_filebuf<char> filebuf(fd, std::ios::in);\n    std::istream is(&filebuf);\n\n    std::string line;\n    while(getline(is, line)){\n        std::string currentKey;\n        std::stringstream wordStream(line);\n        if( !(wordStream >> currentKey)){\n             continue;\n        }\n        if(currentKey != key){\n            continue;\n        }\n        std::string val;\n        if( wordStream >> val){\n            return val;\n        } \n        break;\n        \n    }\n    return std::string();\n\n}\n\n\n\nstd::string osutil::fcntlflagsToString(int flags)\n{\n    std::string o;\n    if (flags & O_WRONLY){\n        o += \"O_WRONLY \";\n    } else if (flags & O_RDWR){\n        o += \"O_RDWR \";\n    } else {\n        o += \"O_RDONLY \";\n    }\n\n    if (flags & O_CREAT)\n        o += \"O_CREAT \";\n    if (flags & O_CLOEXEC)\n        o += \"O_CLOEXEC \";\n    if (flags & O_DIRECTORY)\n        o += \"O_DIRECTORY \";\n    if (flags & O_EXCL)\n        o += \"O_EXCL \";\n    if (flags & O_NOCTTY)\n        o += \"O_NOCTTY \";\n    if (flags & O_NOFOLLOW)\n        o += \"O_NOFOLLOW \";\n#ifdef O_TMPFILE\n    if (flags & O_TMPFILE)\n        o += \"O_TMPFILE \";\n#endif\n\n    if (flags & O_APPEND)\n        o += \"O_APPEND \";\n    if (flags & O_ASYNC)\n        o += \"O_ASYNC \";\n    if (flags & O_DIRECT)\n        o += \"O_DIRECT \";\n    if (flags & O_DSYNC)\n        o += \"O_DSYNC \";\n    if (flags & O_LARGEFILE)\n        o += \"O_LARGEFILE \";\n    if (flags & O_NOATIME)\n        o += \"O_NOATIME \";\n    if (flags & O_NONBLOCK)\n        o += \"O_NONBLOCK \";\n    if (flags & O_PATH)\n        o += \"O_PATH \";\n    if (flags & O_DSYNC)\n        o += \"O_DSYNC \";\n    if (flags & O_SYNC)\n        o += \"O_SYNC \";\n    if (flags & O_TRUNC)\n        o += \"O_TRUNC \";\n\n    return o;\n}\n\n\n/// Merely a debug function\nvoid osutil::printOpenFds(bool onlyRegular)\n{\n    QIErr() << \"open fds:\\n\";\n    for(const int fd : osutil::FdEntries()){\n        auto st = os::fstat(fd);\n        if( onlyRegular && ! S_ISREG(st.st_mode)){\n            continue;\n        }\n        QByteArray fdPath = QByteArray(\"/proc/self/fd/\") + QByteArray::number(fd);\n        auto resolvedPath = os::readlink<QByteArray>(fdPath);\n        QIErr() << fd << \": \" << resolvedPath;\n    }\n}\n\nvoid osutil::randomSleep(int msMin, int msMax)\n{\n    std::mt19937_64 eng{std::random_device{}()};\n    std::uniform_int_distribution<> dist{msMin, msMax};\n    std::this_thread::sleep_for(std::chrono::milliseconds{dist(eng)});\n}\n\n/// For most efficient usage assign a bufsize a little larger than the (probable)\n/// file size.\nQByteArray osutil::readWholeFile(int fd, int bufSize)\n{\n    assert(bufSize > 0);\n    QByteArray buf;\n    buf.resize(bufSize);\n    int offset=0;\n    while(true){\n        char* dataPtr = buf.data() + offset;\n        auto readCount = os::read(fd, dataPtr, static_cast<size_t>(bufSize), true);\n        if(readCount < bufSize){\n            // EOF\n            buf.resize(offset + static_cast<int>(readCount));\n            return buf;\n        }\n        offset += bufSize;\n        buf.resize(buf.size() + bufSize);\n    }\n\n}\n\n/// @param fd: typically STDOUT_FILENO or similar\nbool osutil::isTTYForegoundProcess(int fd)\n{\n    return getpgrp() == tcgetpgrp(fd);\n}\n\n\n/// Wait until a typical 'TERM'-signal occurs. During wait the signal handlers\n/// are overridden and restored afterwards.\nvoid osutil::waitForSignals()\n{\n    QVarLengthArray<sighandler_t, 64> oldHandlers;\n\n    // wait for typical signals to exit\n    for(int s : os::catchableTermSignals()){\n        oldHandlers.push_back(os::signal(s, [](int){}));\n    }\n    sigset_t sigs;\n    os::sigfillset(&sigs);\n    os::sigwait(&sigs);\n    for (int i=0; i < oldHandlers.size(); ++i) {\n        os::signal(os::catchableTermSignals()[static_cast<size_t>(i)], oldHandlers[i]);\n    }\n\n}\n\n/// Set a signal handler doing nothing for the specified signales.\n/// Note that this is *not* equivalent to SIG_IGN:\n/// SIG_IGN is inherited on execve, our signal handler is not.\nvoid osutil::setInertSighandler(const std::vector<int> &sigs)\n{\n    struct sigaction act{};\n    act.sa_handler = intertSighandler;\n    sigemptyset (&act.sa_mask);\n    act.sa_flags = SA_RESTART;\n    for(auto s : sigs){\n        os::sigaction(s, &act, nullptr);\n    }\n}\n\n\n/// be verbose in case os::close fails\nvoid osutil::closeVerbose(int fd)\n{\n    try {\n        os::close(fd);\n    } catch (const os::ExcOs& e) {\n        std::cerr << e.what()\n                  << generate_trace_string()\n                  << \"\\n\";\n    }\n}\n\n/// Filedescriptors are usually given out using low integers, this function allows\n/// for finding the highest fd starting at startFd. If startFd==-1, return the\n/// the highest possible free fd (per-process max.-fd-count is e.g. 1024).\n/// @return: A fd-number if a free fd could be found. The fd is not opened, so there is\n/// a race-condition here. If no fd in the given range could be found, return -1.\nint osutil::findHighestFreeFd(int startFd, int minFd){\n    int fd = (startFd == -1) ? static_cast<int>(osutil::getMaxCountOpenFiles() -1)\n                             : startFd;\n    for(; fd >= minFd; --fd) {\n        if(fd == 255 ){ // that one is usually reserved\n            continue;\n        }\n        if(! osutil::fdIsOpen(fd)){\n            return fd;\n        }\n    }\n    return -1;\n}\n\n/// @param path: if empty, it will be filled with tmp.xxx at system's tempdir\nint osutil::mktmp(QByteArray &path, int flags){\n    if(path.isEmpty()){\n        path = QDir::tempPath().toUtf8();\n        path = pathJoinFilename(path, QByteArray(\"tmp.XXXXXX\"));\n    }\n    int fd = ::mkostemp(path.data(), flags);\n    if (fd < 0) {\n        throw os::ExcOs(\"osutil::mktmp failed\");\n    }\n    return fd;\n}\n\nint osutil::mktmp(int flags){\n    QByteArray p(QDir::tempPath().toUtf8());\n    p = pathJoinFilename(p, QByteArray(\"tmp.XXXXXX\"));\n    return osutil::mktmp(p, flags);\n}\n\nint osutil::unnamed_tmp(int flags){\n    // tmpfs and possibly other filesystems do not suppot O_TMPFILE, for\n    // the sake of simplicity just use mkostemp.\n#if false\n    //#ifdef O_TMPFILE\n    int fd = os::open(p, O_RDWR | O_TMPFILE | O_EXCL | o_flags, true, S_IRUSR | S_IWUSR);\n#else\n    QByteArray p;\n    int fd = osutil::mktmp(p, flags);\n    if(remove(p) < 0){\n        fprintf(stderr, \"%s: failed to delete the just created file %s - %s\\n\",\n                __func__, p.constData(), strerror(errno));\n    }\n#endif\n    return fd;\n}\n\n"
  },
  {
    "path": "src/common/oscpp/osutil.h",
    "content": "#pragma once\n\n#include <QDebug>\n#include <sys/resource.h>\n#include <string>\n#include <vector>\n#include <fcntl.h>\n#include <sys/types.h>\n#include <unistd.h>\n\n#include \"os.h\"\n#include \"cleanupresource.h\"\n#include \"util.h\"\n\nnamespace osutil {\n\nint countOpenFds();\nvoid closeVerbose(int fd);\n\nstd::string fcntlflagsToString(int flags);\nbool fdIsOpen(int fd);\n\n/// Shells usually start at low numbers for internal file descriptors (usually 10),\n/// we try to find the highest possible free fd\n/// If startFd != -1, start searching from that.\nint findHighestFreeFd(int startFd=-1, int minFd=11);\n\n\ntemplate <class Str_t>\nStr_t findPathOfFd(int fd);\n\nrlim_t getMaxCountOpenFiles();\n\nbool isTTYForegoundProcess(int fd);\n\n\nstd::string parseGenericKeyValFile(int dirFd,\n                                   const std::string &filename,\n                                   const std::string &key);\nvoid printOpenFds(bool onlyRegular=false);\n\nvoid randomSleep(int msMin, int msMax);\n\nQByteArray readWholeFile(int fd, int bufSize);\nint retrieveFdFlags(int fd);\nint retrieveFdFlags(int fdInfoDir, const std::string &fdNb);\nint reopenFdByPath(int oldFd, int openflags, bool clo_exec=true,\n                   bool restoreOffset=true);\n\nbool sameFile(const os::stat_t& st1, const os::stat_t& st2);\n\nint mktmp(QByteArray& path, int flags=os::OPEN_CLOEXEC);\nint mktmp(int flags=os::OPEN_CLOEXEC);\nint unnamed_tmp(int flags=os::OPEN_CLOEXEC);\n\nvoid waitForSignals();\n\nvoid setInertSighandler(const std::vector<int>& sigs);\n\n\n} // namespace fdcontrol\n\n\n\n\n\n/// Find path where an open fd of OUR process (currently) points to.\n/// @return path\n/// @throws ExcReadLink\ntemplate <class Str_t>\nStr_t osutil::findPathOfFd(int fd){\n    char procfdPath[PATH_MAX];\n    snprintf(procfdPath, sizeof(procfdPath), \"/proc/self/fd/%d\", fd);\n    Str_t path = os::readlink<Str_t>(procfdPath);\n    return path;\n}\n\n\n\n\n"
  },
  {
    "path": "src/common/pathtree.cpp",
    "content": "#include <cassert>\n#include <iostream>\n#include \"pathtree.h\"\n#include \"util.h\"\n#include \"logger.h\"\n\n/// Write the next available filename from the null-terminated path to out.\n/// Warning: must not contain leading '/'.\n/// @return the current ptr, if out was written, else nullptr (end of string)\nstatic const char* nextFilename(const char* path, StrLight& out){\n    assert(*path != '/');\n    const char* begin = path;\n    for(; *path != '\\0'; ++path){\n        if(*path == '/'){\n            // we are done:\n            out.setRawData(begin, path - begin);\n            // slash '/' not of interest: go on.\n            ++path;\n            return path;\n        }\n    }\n    if(begin == path){\n        // may happen if '/' is exanimed\n        return nullptr;\n    }\n    out.setRawData(begin, path - begin);\n    assert(*path == '\\0');\n    return path;\n}\n\n\n/// Construct a new dir iterator pointing at a directory entries at path.\nPathTree::iterator::iterator(_DirMap::const_iterator begin, _DirMap::const_iterator end,\n                             const StrLight& path) :\n    d(std::make_shared<PrivateData>())\n{\n    if(begin == end){\n        return;\n    }\n\n    d->currentPath = path.deepCopy();\n    appendPath(begin.key());\n    d->dirStack.push_back( { begin, end, begin.key().size() });\n\n    if(! begin.value()->isEnd){\n        // move to first *really* inserted path\n        ++(*this);\n    }\n}\n\nPathTree::iterator::iterator()\n= default;\n\n\n\nbool PathTree::iterator::operator==(const PathTree::iterator &rhs) const\n{\n    bool ourDirsEmpty = (d == nullptr) ? true : d->dirStack.empty();\n    bool otherDirsEmpty = (rhs.d == nullptr) ? true : rhs.d->dirStack.empty();\n\n    if(ourDirsEmpty || otherDirsEmpty){\n        // cannot compare dirStack\n        return ourDirsEmpty == otherDirsEmpty;\n    }\n    return d->currentPath == rhs.d->currentPath;\n}\n\nbool PathTree::iterator::operator!=(const PathTree::iterator &rhs) const\n{\n    return ! (*this == rhs);\n}\n\n/// Iterate exactly over the inserted directories skipping\n/// possible other paths, e.g. if /home/user/foo is set,\n/// /home and /home/user will be skipped.\nPathTree::iterator &PathTree::iterator::operator++()\n{\n    assert(! d->dirStack.empty());\n    while (true) {\n        nextDir();\n        if(d->dirStack.empty()){\n            return *this;\n        }\n        auto & currentDir = d->dirStack.back();\n        if(currentDir.it != currentDir.end &&\n                currentDir.it.value()->isEnd){\n            return *this;\n        }\n    }\n}\n\n\n/// go to next dir, prefering going as deep as possible\n/// first, then to the sibling directories and finally\n/// walk up the tree again, jumping over already visited dirs.\nvoid PathTree::iterator::nextDir()\n{\n    assert(! d->dirStack.empty());\n    auto & dirInfo = d->dirStack.back();\n    // go into depth first, if possible\n    auto & subDirs = dirInfo.it.value()->children;\n\n    if(cdSubDirIfExist(subDirs.begin(), subDirs.end())){\n        return;\n    }\n    if(nextSiblingIfExist(dirInfo)){\n        return;\n    }\n    nextEntryInParentDirs();\n}\n\n\nbool PathTree::iterator::cdSubDirIfExist(_DirMap::iterator begin, _DirMap::iterator end)\n{\n    if(begin == end){\n        return false;\n    }\n    appendPath(begin.key());\n    d->dirStack.push_back( { begin, end, begin.key().size() });\n    return true;\n}\n\n/// Go up as many parent directories necessary until the next valid\n/// entry is found. The dirstack will be empty, in case we're done\nvoid PathTree::iterator::nextEntryInParentDirs()\n{\n    while (true) {\n        auto & currentDirInfo = d->dirStack.back();\n        stripPath(currentDirInfo.sizeDirName);\n        d->dirStack.pop_back();\n        if(d->dirStack.empty()){\n            // we are done -> empty stack == iterator.end()\n            return;\n        }\n        auto & upperDir = d->dirStack.back();\n        if(nextSiblingIfExist(upperDir)){\n            return;\n        }\n        // got this dir as well -> go up even more\n    }\n}\n\n/// increment the passed dir and adjust the path appropriately if we could switch\n/// to the next sibling (same directory level, no parent- or subdir)\nbool PathTree::iterator::nextSiblingIfExist(PathTree::iterator::CurrentDirInfo &dirInfo)\n{\n    ++dirInfo.it;\n    if(dirInfo.it == dirInfo.end){\n        return false;\n    }\n    stripPath(dirInfo.sizeDirName);\n    appendPath(dirInfo.it.key());\n    dirInfo.sizeDirName = dirInfo.it.key().size();\n    return true;\n}\n\nvoid PathTree::iterator::appendPath(const StrLight &dirname)\n{\n    if( d->currentPath.empty() || d->currentPath.back() != '/'){\n        d->currentPath += '/';\n    }\n    d->currentPath += dirname;\n}\n\nvoid PathTree::iterator::stripPath(size_t lastDirSize)\n{\n    assert(lastDirSize <= d->currentPath.size());\n    d->currentPath.resize(d->currentPath.size() - lastDirSize);\n    if(! d->currentPath.empty() && d->currentPath.back() == '/'){\n        d->currentPath.pop_back();\n    }\n}\n\n\nconst PathTree::iterator PathTree::begin() const\n{\n    return iterator(m_rootDirMapDummy.begin(), m_rootDirMapDummy.end(), \"/\");\n}\n\n\nconst PathTree::iterator PathTree::end() const\n{\n    return iterator();\n}\n\n\n/// @return an iterator pointing on the directory-node corresponding to path.\n/// Subsequentially incrementing it results in an iteration of all sub-paths\n/// as well as path, if it exists. Note: path may also be an intermediate\n/// directory (dir->isEnd == false)\nPathTree::iterator PathTree::iter(const StrLight &path) const\n{\n    auto dir =findDir(path);\n    if(dir == nullptr  ){\n        return end();\n    }\n    _DirMap::const_iterator itOfChildInParent;\n    _DirMap::const_iterator dummyEnd;\n    if(dir == m_rootDir){\n        itOfChildInParent = m_rootDirMapDummy.begin();\n        dummyEnd = m_rootDirMapDummy.end();\n    } else {\n        // need to determine the iterators from parent.\n        auto parentDirWeak = dir->parent;\n        assert(! is_uninitialized(parentDirWeak));\n        auto parentDir = parentDirWeak.lock();\n\n        itOfChildInParent = parentDir->children.find(dir->name);\n        assert(itOfChildInParent != parentDir->children.end());\n        // Do not iterate over possible siblings as well: set next one as end()\n        // (even if it is no real end)\n        dummyEnd = itOfChildInParent;\n        ++dummyEnd;\n    }\n    return iterator(itOfChildInParent, dummyEnd, splitAbsPath(path).first);\n}\n\n\n\n /// @return: An iterator for all subpaths of param path (so path is *not*\n /// traversed).\n PathTree::iterator PathTree::subpathIter(const StrLight &path) const\n {\n     auto dir =findDir(path);\n     if(dir == nullptr  ){\n         return end();\n     }\n     return iterator(dir->children.begin(), dir->children.end(), path);\n }\n\n PathTree::iterator PathTree::erase(PathTree::iterator it)\n {\n     assert(! it.d->dirStack.empty());\n     assert( it.d->dirStack.back().it != it.d->dirStack.back().end);\n     auto dir = it.d->dirStack.back().it.value();\n     assert(dir->isEnd);\n     dir->isEnd = false;\n\n     // before possibly deleting empty in-between paths (isEnd=false)\n     // move to next 'real dir' -> otherwise iterators might have been invalidated.\n     ++it;\n\n     // go up the current tree and erase all empty dirs (stop on first non-empty)\n     while (true) {\n         if(! dir->children.empty()){\n             // our dir has children, so do not erase it!\n             return it;\n         }\n         // our dir has no children, so it is safe for our parent to delete it\n         auto parentDirWeak = dir->parent;\n         if(is_uninitialized(parentDirWeak)){\n             // reached root /\n             return it;\n         }\n         auto parentDir = dir->parent.lock();\n\n         auto itOfDirInParent = parentDir->children.find(dir->name);\n         assert(itOfDirInParent != parentDir->children.end());\n         parentDir->children.erase(itOfDirInParent);\n         dir = parentDir;\n     }\n }\n\n//////////////////////////////////////////////////////////////////////////////////////////////\n\nPathTree::PathTree()\n{\n    commonConstructor();\n}\n\n\nvoid PathTree::commonConstructor()\n{\n    m_rootDir = std::make_shared<_Dir>(\"/\");\n    m_rootDirMapDummy = {{\"\", m_rootDir}};\n    m_rootNodeIsContained = false;\n}\n\nconst std::unordered_set<StrLight> &PathTree::allPaths() const\n{\n    return m_allPaths;\n}\n\n\nvoid PathTree::printDbg()\n{\n    if(m_rootDir->children.empty()){\n        std::cerr << __func__ << \" tree is empty\\n\";\n    } else {\n        printRec(m_rootDir);\n    }\n}\n\nvoid PathTree::clear()\n{\n    m_rootDir->children.clear();\n    m_rootDir->isEnd = false;\n    m_allPaths.clear();\n    m_orderedPathlenghts.clear();\n\n}\n\nbool PathTree::isEmpty() const\n{\n    return m_rootDir->children.empty();\n}\n\n\n\nvoid PathTree::insert(const StrLight &path){\n    assert( path.find(\"//\") == StrLight::npos);\n    auto currenDir = m_rootDir;\n    const char* cpath = path.c_str();\n    // ignore leading /\n    ++cpath;\n\n   StrLight filename;\n    while (  (cpath = nextFilename(cpath, filename)) != nullptr ) {\n        currenDir = mkDirIfNotExist(currenDir, filename);\n    }\n    currenDir->isEnd = true;\n\n    m_allPaths.insert(path.deepCopy());\n    if(std::find(m_orderedPathlenghts.begin(), m_orderedPathlenghts.end(), path.size()) ==\n             m_orderedPathlenghts.end()){\n        m_orderedPathlenghts.push_back(path.size());\n        std::sort( m_orderedPathlenghts.begin(), m_orderedPathlenghts.end());\n    }\n\n}\n\nbool PathTree::contains(const StrLight &path) const\n{\n    StrLight pathLight;\n    pathLight.setRawData(path.c_str(), path.size());\n    auto dir = findDir(pathLight);\n    if(dir == nullptr){\n        return false;\n    }\n    return dir->isEnd;\n}\n\n/// Check if path is a parent path of any other path within this\n/// tree. Example:\n/// /home/user/foo exists in this tree and it is queried for path\n/// /home/user -> true is returned\n///\n/// If allowEquals is true, true is also returned, if\n/// the searched path is contained but has no children (equals\n/// to the searched path).\nbool PathTree::isParentPath(const StrLight &path, bool allowEquals) const {\n    StrLight pathLight;\n    pathLight.setRawData(path.c_str(), path.size());\n    auto dir = findDir(pathLight);\n    if(dir == nullptr){\n        return false;\n    }\n\n    if(! dir->children.empty()){\n        return true;\n    }\n    // no children exist\n    return dir->isEnd && allowEquals;\n}\n\n\n/// @return true, if param path is subpath of any previously inserted paths\n/// or the same, if allowEquals=true\nbool PathTree::isSubPath(const StrLight &path, bool allowEquals) const {\n    // maybe_todo: continously calculate the hash (not always from beginning).\n    if(! m_orderedPathlenghts.empty() && m_orderedPathlenghts.front() == 1){\n        // We contain the root node. Any path is a subpath execpt /\n        assert(m_allPaths.find(\"/\") != m_allPaths.end());\n        if(allowEquals){\n            return  true;\n        }\n        return path != '/';\n    }\n    m_rawbuftmp.setRawData(path.constData(), path.size());\n    for(size_t s : m_orderedPathlenghts){\n        if(s < path.size()){\n            // If we didn't have a / at the next position, we would cut the\n            // path at a wrong position -> continue\n            if(path[s] != '/'){\n                continue;\n            }\n            // A candiate path with the same size exists. No need to check\n            // allowEquals, because the path continues\n            m_rawbuftmp.setRawSize(s);\n            if(m_allPaths.find(m_rawbuftmp) != m_allPaths.end()){\n                return true;\n            }\n        } else if( s > path.size()){\n            // m_orderedPathlenghts is ordered ascending -> the\n            // next paths will be even longer:\n            return false;\n        } else {\n            // s == path.size\n            // The next m_orderedPathlength will be greater, so we can only\n            // be a 'sub'-path, if allowEquals is true.\n            if( allowEquals){\n                m_rawbuftmp.setRawSize(s);\n                if(m_allPaths.find(m_rawbuftmp) != m_allPaths.end()){\n                    return true;\n                }\n            }\n            return false;\n        }\n    }\n    return false;\n}\n\nvoid PathTree::printRec(const PathTree::_DirPtr &node, const StrLight &dir) const\n{\n    for(const auto & n : node->children){\n        auto fullPath = dir + '/' + n->name;\n        printf(\"%s %.*s\\n\", __func__, int(fullPath.size()), fullPath.constData());\n        printRec(n, fullPath);\n    }\n}\n\n/// @return the new or existing dir\nPathTree::_DirPtr PathTree::mkDirIfNotExist(PathTree::_DirPtr &parent,\n                                            const StrLight &name)\n{\n    auto it = parent->children.find(name);\n    if(it == parent->children.end()){\n        auto name_copy = name.deepCopy();\n        auto newDir = std::make_shared<_Dir>(name_copy);\n        newDir->parent = parent;\n        parent->children[name_copy] = newDir;\n        return newDir;\n    }\n    return it.value();\n}\n\n\n/// @return The node exactly matching the passed path or nullptr\nPathTree::_DirPtr PathTree::findDir(const StrLight &path) const\n{\n    auto currentDir = m_rootDir;   \n    const char* cpath = path.constData();\n    // ignore leading /\n    ++cpath;\n    StrLight filename;\n    while (  (cpath = nextFilename(cpath, filename)) != nullptr ) {\n        auto it = currentDir->children.find(filename);\n        if(it == currentDir->children.end()){\n            return nullptr;\n        }\n        currentDir = it.value();\n    }\n    return currentDir;\n}\n\n/*\nvoid PathTree::recursiveCopy(_DirPtr& dst, const _DirPtr& src)\n{\n    dst->isEnd = src->isEnd;\n    dst->name = src->name;\n    dst->children.reserve(src->children.size());\n    for(auto& subSrc  : src->children){\n        auto newDir = std::make_shared<_Dir>();\n        dst->children[subSrc.first] = newDir;\n        newDir->parent = dst;\n        recursiveCopy(newDir, subSrc.second);\n    }\n}\n*/\n\nvoid PathTree::recursiveClear(PathTree::_DirPtr &dir)\n{\n    for(auto& sub  : dir->children){\n        recursiveClear(sub);\n    }\n    dir->children.clear();\n}\n\n\n\n\n"
  },
  {
    "path": "src/common/pathtree.h",
    "content": "#pragma once\n\n#include <vector>\n#include <memory>\n#include <unordered_set>\n#include <QHash>\n\n\n#include \"strlight.h\"\n\n\n/// Add a set of absolute file paths and\n/// later check, if a given path is a sub-\n/// or parent path of one of those.\n/// No filesystem-activity involved!\n/// Please make sure the paths are clean beforehand\n/// ( no //, no traling /, no relative paths ../../ etc.)\nclass PathTree\n{\nprivate:\n    struct _Dir;\n    typedef std::shared_ptr<_Dir> _DirPtr;\n    typedef QHash<StrLight, _DirPtr > _DirMap;\n\n    struct _Dir {\n        _Dir(const StrLight& name) :\n            isEnd(false),\n            name(name){}\n\n        _DirMap children;\n        std::weak_ptr<_Dir> parent; // break reference cycles !\n        bool isEnd;\n        StrLight name;\n    };\n\npublic:\n\n    class iterator\n    {\n    public:\n        bool operator==(const iterator& rhs) const;\n        bool operator!=(const iterator& rhs) const;\n\n        iterator& operator++ ();\n        StrLight & operator*() { return d->currentPath; }\n\n    private:\n        struct CurrentDirInfo {\n            _DirMap::const_iterator it; // current position\n            _DirMap::const_iterator end;\n            size_t sizeDirName; // putting brace {} here makes compilation fail. Why?\n        };\n\n        typedef std::vector<CurrentDirInfo> DirStack;\n\n        iterator(_DirMap::const_iterator begin, _DirMap::const_iterator end,\n                 const StrLight &path);\n        iterator();\n\n        void nextDir();\n        bool cdSubDirIfExist(_DirMap::iterator begin, _DirMap::iterator end);\n        void nextEntryInParentDirs();\n        bool nextSiblingIfExist(CurrentDirInfo& dirInfo);\n\n        void appendPath(const StrLight& dirname);\n        void stripPath(size_t lastDirSize);\n\n        struct PrivateData{\n            DirStack dirStack; // last element always points to the current\n                               // dir of iteration\n            StrLight currentPath;\n        };\n        std::shared_ptr<PrivateData> d;\n\n        friend class PathTree;\n    };\n\npublic:\n\n    const iterator begin() const;\n    const iterator end() const ;\n\n    iterator iter(const StrLight& path) const;\n    iterator subpathIter(const StrLight& path) const;\n    iterator erase(iterator it);\n\npublic:\n    PathTree();\n    ~PathTree() = default;\n    PathTree(const PathTree&) = delete ;\n    PathTree& operator=( const PathTree& ) = delete ;\n\n    void clear();\n    bool isEmpty() const;\n\n    void insert(const StrLight& path);\n    template<typename Iterator>\n    void insert(Iterator first, Iterator last);\n\n    bool contains(const StrLight & path) const;\n\n    bool isParentPath(const StrLight &path, bool allowEquals=false) const ;\n\n    bool isSubPath(const StrLight &path, bool allowEquals=false) const;\n\n    void printDbg();\n\n\n    const std::unordered_set<StrLight>& allPaths() const;\n\nprivate:\n    static const char sep = '/';\n\n    void commonConstructor();\n\n    _DirPtr m_rootDir;\n    _DirMap m_rootDirMapDummy;\n    mutable StrLight m_rawbuftmp;\n    std::unordered_set<StrLight> m_allPaths;\n    std::vector<size_t> m_orderedPathlenghts;\n    bool m_rootNodeIsContained;\n\n\n    void printRec(const _DirPtr &node, const StrLight &dir=\"\") const;\n\n    _DirPtr mkDirIfNotExist(_DirPtr& parent, const StrLight &name);\n\n    _DirPtr findDir(const StrLight &path) const;\n\n    // static void recursiveCopy(_DirPtr &dst, const _DirPtr &src);\n    static void recursiveClear(_DirPtr &dir);\n};\n\n\ntemplate<typename Iterator>\nvoid PathTree::insert(Iterator first, Iterator last)\n{\n    for(Iterator it=first; it != last; ++it) {\n        this->insert(*it);\n    }\n}\n\n\n\n"
  },
  {
    "path": "src/common/pidcontrol.cpp",
    "content": "\n#include <unistd.h>\n#include <fstream>\n#include <memory.h>\n\n#include <iostream>\n#include <fstream>\n#include <string>\n#include <sstream>\n#include <fcntl.h>\n\n#include \"pidcontrol.h\"\n#include \"logger.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n\n\n/// returns an empty string, if opening fails\n/// (or the file belonging to pid was empty)\nstd::string pidcontrol::parseCmdlineOfPID(pid_t pid)\n{\n    std::string cmdline;\n\n    const std::string pathToPid = \"/proc/\" + std::to_string(pid) + \"/cmdline\";\n    std::ifstream f;\n    f.open(pathToPid, std::fstream::in);\n    if(f.is_open() ) {\n        // recombine the string-set to one string. From man proc:\n        // The command-line arguments appear\n        // in this file as a set of strings separated by null bytes\n        // ('\\0'), with a further null byte after the last string.\n        char ch;\n        cmdline.reserve(128);\n        bool previousChWasBSlash0 = false;\n        while (f >> std::noskipws >> ch) {\n            if(ch == '\\0'){\n                if(previousChWasBSlash0){\n                    break;\n                }\n                cmdline.push_back(' ');\n                previousChWasBSlash0 = true;\n            } else {\n                cmdline.push_back(ch);\n                previousChWasBSlash0 = false;\n            }\n        }\n        if(! cmdline.empty()){\n            cmdline.pop_back();\n        }\n    }\n    return cmdline;\n}\n\n\n\n\n\n/// Read the status file at /proc/$PID/status and return\n/// the real user id found in it (but check for null).\n/// See also man 5 proc.\n/// @param procDirFd: *must* be an open directory descriptor\n/// at /proc/$pid\nNullableValue<uid_t>\npidcontrol::parseRealUidOf(int procDirFd){\n    std::string uid = osutil::parseGenericKeyValFile(procDirFd, \"status\", \"Uid:\");\n    if(uid.empty()){\n        return {};\n    }\n    return {qVariantTo_throw<uid_t>(QByteArray::fromStdString(uid))};\n}\n\n\n\n\n\n\n"
  },
  {
    "path": "src/common/pidcontrol.h",
    "content": "#pragma once\n\n#include \"nullable_value.h\"\n\n#include <string>\n\nnamespace pidcontrol {\n\n    std::string parseCmdlineOfPID(pid_t pid);\n\n    NullableValue<uid_t> parseRealUidOf(int procDirFd);\n\n} // namespace pidcontrol\n\n\n\n"
  },
  {
    "path": "src/common/qfddummydevice.cpp",
    "content": "#include <iostream>\n\n#include \"qfddummydevice.h\"\n\n#include \"os.h\"\n\n/// @param becomeOwner: if true, close the fd in destructor\nQFdDummyDevice::QFdDummyDevice(int fd, bool becomeOwner) :\n    m_fd(fd),\n    m_owner(becomeOwner)\n{}\n\nQFdDummyDevice::~QFdDummyDevice()\n{\n    if(m_owner){\n        try {\n            os::close(m_fd);\n        } catch (const os::ExcOs& e) {\n            std::cerr << __func__ << \" \" << e.what() << \"\\n\";\n        }\n    }\n}\n\n\nqint64 QFdDummyDevice::readData(char *data, qint64 maxlen) {\n    return os::read(m_fd, data,static_cast<size_t>(maxlen));\n}\n\nqint64 QFdDummyDevice::writeData(const char *data, qint64 len) {\n    return os::write(m_fd, data, static_cast<size_t>(len));\n}\n"
  },
  {
    "path": "src/common/qfddummydevice.h",
    "content": "#pragma once\n\n#include <QIODevice>\n\n\n/// Dummy wrapper because QFile::open(int fd,...) cannot handle an already\n/// open fd...\nclass QFdDummyDevice : public QIODevice\n{\npublic:\n    QFdDummyDevice(int fd, bool becomeOwner=false);\n    ~QFdDummyDevice() override;\n\npublic:\n    QFdDummyDevice(const QFdDummyDevice&) = delete;\n    void operator=(const QFdDummyDevice&) = delete;\n\nprotected:\n    int m_fd;\n    bool m_owner;\n\n    qint64 readData(char *data, qint64 maxlen) override;\n    qint64 writeData(const char *data, qint64 len) override;\n};\n"
  },
  {
    "path": "src/common/qfilethrow.cpp",
    "content": "#include \"qfilethrow.h\"\n\n#include \"util.h\"\n\n/// @throws QExcIo\nvoid QFileThrow::flush()\n{\n    if(! QFile::flush()){\n        throw QExcIo(qtr(\"Failed to flush %1: %2\")\n                     .arg(this->fileName(), this->errorString()));\n    }\n}\n\nbool QFileThrow::open(QIODevice::OpenMode flags)\n{\n    if(! QFile::open(flags)){\n        throw QExcIo(qtr(\"Failed to open %1: %2\")\n                     .arg(this->fileName(), this->errorString()));\n    }\n    return true;\n}\n\nbool QFileThrow::open(FILE *f, QIODevice::OpenMode ioFlags, QFileDevice::FileHandleFlags handleFlags)\n{\n    if(! QFile::open(f, ioFlags, handleFlags)){\n        throw QExcIo(qtr(\"Failed to open file: %1\")\n                     .arg(this->errorString()));\n    }\n    return true;\n}\n\nbool QFileThrow::open(int fd, QIODevice::OpenMode ioFlags, QFileDevice::FileHandleFlags handleFlags)\n{\n    if(! QFile::open(fd, ioFlags, handleFlags)){\n        throw QExcIo(qtr(\"Failed to open fd %1: %2\")\n                     .arg(fd).arg(this->errorString()));\n    }\n    return true;\n}\n\n/// @throws QExcIo\n/// @return: *always* true, only bool because of 'override'\nbool QFileThrow::seek(qint64 offset)\n{\n    if(! QFile::seek(offset)){\n        throw QExcIo(qtr(\"Failed to seek %1: %2\")\n                     .arg(this->fileName(), this->errorString()));\n    }\n    return true;\n}\n\nqint64 QFileThrow::readData(char *data, qint64 maxSize){\n    auto ret = QFile::readData(data, maxSize);\n    if(ret == -1){\n        throw QExcIo(qtr(\"Failed to read from file %1: %2\")\n                     .arg(this->fileName(), this->errorString()));\n    }\n    return ret;\n}\n\nqint64 QFileThrow::readLineData(char *data, qint64 maxlen){\n    auto ret = QFile::readLineData(data, maxlen);\n    if(ret == -1){\n        throw QExcIo(qtr(\"Failed to readLine from file %1: %2\")\n                     .arg(this->fileName(), this->errorString()));\n    }\n    return ret;\n}\n\n/// @throws QExcIo\nqint64 QFileThrow::writeData(const char *data, qint64 len)\n{\n    auto bytesWritten = QFile::writeData(data, len);\n    if(bytesWritten == -1){\n        throw QExcIo(qtr(\"Failed to write to file %1: %2\")\n                     .arg(this->fileName(), this->errorString()));\n    }\n    if( bytesWritten != len){\n        throw QExcIo(qtr(\"Unexpected written size for file %1 - \"\n                         \"expected %2, actual: %3\")\n                     .arg(this->fileName()).arg(len).arg(bytesWritten));\n    }\n    return bytesWritten;\n}\n"
  },
  {
    "path": "src/common/qfilethrow.h",
    "content": "#pragma once\n\n#include <QFile>\n\nclass QFileThrow : public QFile\n{\npublic:\n    using QFile::QFile;\n\n    void flush();\n\n    bool open(QFile::OpenMode flags) override;\n    bool open(FILE *f, OpenMode ioFlags, FileHandleFlags handleFlags=DontCloseHandle);\n    bool open(int fd, OpenMode ioFlags, FileHandleFlags handleFlags=DontCloseHandle);\n\n    bool seek(qint64 offset) override;\n\n    qint64 readData(char *data, qint64 maxlen) override;\n    qint64 readLineData(char *data, qint64 maxlen) override;\n    qint64 writeData(const char *data, qint64 len) override;\n\n};\n"
  },
  {
    "path": "src/common/qoptargparse/CMakeLists.txt",
    "content": "\nadd_library(lib_qoptargparse\n    excoptargparse.cpp\n    qoptargparse.cpp\n    qoptarg.cpp\n    qoptsqlarg.cpp\n    qoptvarlenarg.cpp\n    qoptargtrigger.cpp\n )\n\ntarget_link_libraries(lib_qoptargparse PUBLIC\n    Qt5::Core\n    lib_util\n)\n"
  },
  {
    "path": "src/common/qoptargparse/excoptargparse.cpp",
    "content": "#include \"excoptargparse.h\"\n\n\n\nExcOptArgParse::ExcOptArgParse(const QString &text)\n    : QExcCommon(text, false)\n{\n\n}\n"
  },
  {
    "path": "src/common/qoptargparse/excoptargparse.h",
    "content": "#pragma once\n\n#include \"exccommon.h\"\n\nclass ExcOptArgParse : public QExcCommon\n{\npublic:\n    ExcOptArgParse(const QString & text);\n};\n\n\n\n"
  },
  {
    "path": "src/common/qoptargparse/qoptarg.cpp",
    "content": "\n#include <cassert>\n\n#include <utility>\n\n#include \"qoptarg.h\"\n#include \"compat.h\"\n#include \"exccommon.h\"\n#include \"excoptargparse.h\"\n#include \"util.h\"\n#include \"conversions.h\"\n\n\n/// @param shortName short name, one minus is added to the front (-e)\n/// @param name long name, two minus signs are added to the front (--exec)\n/// @param description\n/// @param hasValue --verbose might be a flag, --size 2 has the value 2.\n///\nQOptArg::QOptArg(const QString &shortName,\n                 const QString &name,\n                 QString description,\n                 bool hasValue) :\n    m_name(\"--\" + name),\n    m_description(std::move(description)),\n    m_hasValue(hasValue),\n    m_argIdx(-1),\n    m_internalOnly(false),\n    m_isFinalizeFlag(false),\n    m_isByteSizeArg(false),\n    m_isRelativeDateTime(false),\n    m_relativeDateTimeSubtract(false)\n{\n    if(name.isEmpty()){\n        throw QExcIllegalArgument(\"argname must not be empty\");\n    }\n    if(name.startsWith('-')){\n        throw QExcProgramming(\"please pass names without leading minus\");\n    }\n\n    if(! shortName.isEmpty()){\n        if(shortName.startsWith('-')){\n            throw QExcProgramming(\"please pass short names without leading minus\");\n        }\n        m_shortName = '-' + shortName;\n    }\n\n}\n\n\n/// @param optTrigger\n/// @param defaultTriggerStr Trigger string which shall be used,\n///                          in case no trigger is entered (by the user)\n///\nQOptArg::QOptArg(const QString &shortName,\n                 const QString &name,\n                 const QString &description,\n                 const QOptArgTrigger &optTrigger,\n                 const QString& defaultTriggerStr) :\n    QOptArg(shortName, name, description, true)\n{\n    m_optTrigger = optTrigger;\n    m_defaultTriggerStr = defaultTriggerStr;\n}\n\n\nconst QString &QOptArg::shortName() const\n{\n    return m_shortName;\n}\n\nconst QString& QOptArg::name() const\n{\n    return m_name;\n}\n\nQString QOptArg::description() const\n{\n    return m_description  ;\n}\n\n\nbool QOptArg::hasValue() const\n{\n    return m_hasValue;\n}\n\n\nbool QOptArg::wasParsed() const\n{\n    return m_argIdx != -1;\n}\n\n\nconst QOptArgTrigger &QOptArg::optTrigger() const\n{\n    return m_optTrigger;\n}\n\n/// Meant to be overidden by subclasses.\n/// Called right before a potential trigger word is further\n/// processed.\nQString QOptArg::preprocessTrigger(const char *str) const\n{\n    return str;\n}\n\nconst QString &QOptArg::parsedTrigger() const\n{\n    return m_parsedTrigger;\n}\n\nvoid QOptArg::setParsedTrigger(const QString &parsedTrigger)\n{\n    m_parsedTrigger = parsedTrigger;\n}\n\n/// See also: setAllowedOptions()\n/// @param maxCount: throw, in case more options than maxCount were parsed.\nQStringList QOptArg::getOptions(int maxCount) const\n{\n    if(m_allowedOptions.empty()){\n        throw QExcProgramming(QString(\"%1 called without previous setAllowedOptions\")\n                               .arg(__func__));\n    }\n    if(! m_hasValue){\n        throwgetValueCalledOnFlag(__func__);\n    }\n\n    QStringList valList;\n    for(int i=0; i < m_vals.len; i++){\n        QStringList newVals = QString(m_vals.argv[i])\n                .split(m_allowedOptionsDelimeter, Qt::SkipEmptyParts);\n        for(const QString& str : newVals){\n            if(m_allowedOptions.find(str) == m_allowedOptions.end()){\n                throw ExcOptArgParse(qtr(\"'%1' is not a supported option for '%2'. \")\n                                     .arg(str, m_name));\n            }\n        }\n        valList += newVals;\n        if(valList.size() > maxCount){\n            throw ExcOptArgParse(qtr(\"Only %1 option(s) allowed for argument %2\")\n                                 .arg(maxCount).arg(m_name));\n        }\n    }\n    return valList;\n}\n\n/// Note: this argument must have been marked as 'bytesize' beforehand\nQVariantList QOptArg::getVariantByteSizes(const QVariantList &defaultValues)\n{\n    assert(m_isByteSizeArg);\n    auto sizeStrs = getVariantValues<QString>(defaultValues);\n    QVariantList sizes;\n    Conversions userStrConv;\n    for(const auto& s : sizeStrs){\n        try {\n            sizes.push_back(userStrConv.bytesFromHuman(s.toString()));\n        } catch (const ExcConversion& e) {\n            throw ExcOptArgParse(e.descrip() + \" (arg \" + m_name + ')' );\n        }\n    }\n    return sizes;\n}\n\nQVariantList QOptArg::getVariantRelativeDateTimes(const QVariantList &defaultValues)\n{\n    assert(m_isRelativeDateTime);\n    auto dateTimeStrs = getVariantValues<QString>(defaultValues);\n    QVariantList dateTimes;\n    Conversions userStrConv;\n    for(const auto& s : dateTimeStrs){\n        try {\n            dateTimes.push_back(userStrConv.relativeDateTimeFromHuman(s.toString(),\n                                                                      m_relativeDateTimeSubtract));\n        } catch (const ExcConversion& e) {\n            throw ExcOptArgParse(e.descrip() + \" (arg \" + m_name + ')' );\n        }\n    }\n    return dateTimes;\n}\n\n\n\n/// See also: getOptions(), where the check is performed lazily.\nvoid QOptArg::setAllowedOptions(const std::unordered_set<QString> &options,\n                                const QString &delimeter)\n{\n    m_allowedOptions = options;\n    if(delimeter.isEmpty()){\n        throw QExcIllegalArgument(QString(\"%1: empty delimeter passed.\").arg(__func__));\n    }\n    m_allowedOptionsDelimeter = delimeter;\n}\n\nconst QOptArg::RawValues_t &QOptArg::vals() const\n{\n    return m_vals;\n}\n\nvoid QOptArg::setVals(const RawValues_t &vals)\n{\n    m_vals = vals;\n}\n\nint QOptArg::argIdx() const\n{\n    return m_argIdx;\n}\n\nvoid QOptArg::setArgIdx(int argIdx)\n{\n    m_argIdx = argIdx;\n}\n\nconst QString &QOptArg::defaultTriggerStr() const\n{\n    return m_defaultTriggerStr;\n}\n\n/// see setter\nbool QOptArg::internalOnly() const\n{\n    return m_internalOnly;\n}\n\n/// An internal argument is not displayed in the help\nvoid QOptArg::setInternalOnly(bool internalOnly)\n{\n    m_internalOnly = internalOnly;\n}\n\n/// If *this* argument is parsed, param arg must be parsed\n/// as well.\nvoid QOptArg::addRequiredArg(const QOptArg *arg)\n{\n    assert(arg->name() != this->name());\n    m_requiredArs.append(arg);\n}\n\nconst QVector<const QOptArg *>& QOptArg::requiredArs() const\n{\n    return m_requiredArs;\n}\n\nvoid QOptArg::throwgetValueCalledOnFlag(const char *functionname) const\n{\n    throw QExcProgramming(QString(\"%1() was called although argument %2 \"\n                                  \"was marked as flag (no value)\").arg(functionname, m_name));\n}\n\n/// @param subtractIt: if true, the parsed date is subtracted from current one,\n/// else it is added.\nvoid QOptArg::setIsRelativeDateTime(bool isRelativeDateTime, bool subtractIt)\n{\n    m_isRelativeDateTime = isRelativeDateTime;\n    m_relativeDateTimeSubtract = subtractIt;\n    m_description += qtr(\" Supported units include %1\")\n                          .arg(Conversions::relativeDateTimeUnitDescriptions());\n}\n\n\n/// adds a description, that this argument also accepts bytesizes after given\n/// numbers lik KiB, MiB, etc.\nvoid QOptArg::setIsByteSizeArg(bool isByteSizeArg)\n{\n    m_isByteSizeArg = isByteSizeArg;\n    m_description += qtr(\" You may provide a unit such as KiB, MiB, etc..\");\n}\n\nbool QOptArg::isFinalizeFlag() const\n{\n    return m_isFinalizeFlag;\n}\n\n/// Currently only supported for flags (arguments without values).\n/// If true, the parser will stop processing args, if argument is passed.\n/// This can be used to delegate parsing control to a 'sub-parser'.\n/// Therefor, if finalize is true, an exception will be thrown, if no furhter\n/// arguments are available after the respective flag.\n/// Default is false.\nvoid QOptArg::setFinalizeFlag(bool f)\n{\n    if(m_hasValue){\n        throw QExcProgramming(\"Finalize flag currently only supported for \"\n                              \"flags (arguments without value)\");\n    }\n    m_isFinalizeFlag = f;\n}\n\nconst QString &QOptArg::allowedOptionsDelimeter() const\n{\n    return m_allowedOptionsDelimeter;\n}\n\nconst std::unordered_set<QString>& QOptArg::allowedOptions() const\n{\n    return m_allowedOptions;\n}\n\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "src/common/qoptargparse/qoptarg.h",
    "content": "#pragma once\n\n#include <QString>\n#include <QVector>\n#include <unordered_set>\n\n#include \"compat.h\"\n#include \"util.h\"\n#include \"qoptargtrigger.h\"\n#include \"excoptargparse.h\"\n\nclass QOptArg {\npublic:\n    struct RawValues_t{\n        RawValues_t() : argv(nullptr), len(0) {}\n        char** argv;\n        int len;\n    };\n\n    QOptArg(const QString& shortName, const QString & name,\n            QString  description,\n            bool hasValue=true );\n\n    QOptArg(const QString& shortName, const QString & name,\n            const QString& description,\n            const QOptArgTrigger & optTrigger, const QString& defaultTriggerStr);\n\n    virtual ~QOptArg() = default;\n\n    const QString& shortName() const;\n\n    const QString& name() const;\n\n    virtual QString description() const;\n\n    bool hasValue() const;\n\n    // after parse:\n    bool wasParsed() const;\n\n    const QOptArgTrigger& optTrigger() const;\n    virtual QString preprocessTrigger(const char* str) const;\n\n    const QString& parsedTrigger() const;\n    virtual void setParsedTrigger(const QString &parsedTrigger);\n\n\n    template <typename T>\n    T getValue(const T& defaultValue=T()) const;\n\n    template <typename ContainerT>\n    ContainerT getValues(const ContainerT& defaultValues={});\n\n    template <typename ContainerT>\n    ContainerT getValuesByDelim(const QString& delim=\",\", const ContainerT& defaultValues={},\n                                const int minValueSize=1,\n                                const int maxValueSize=std::numeric_limits<int>::max());\n\n\n    template <typename T>\n    QVariantList getVariantValues(const QVariantList& defaultValues={});\n\n    QStringList getOptions(int maxCount=std::numeric_limits<int>::max()) const;\n    QVariantList getVariantByteSizes(const QVariantList& defaultValues={});\n    QVariantList getVariantRelativeDateTimes(const QVariantList& defaultValues={});\n\n    void setAllowedOptions(const std::unordered_set<QString>& options,\n                           const QString&delimeter=\",\");\n    const std::unordered_set<QString>& allowedOptions() const;\n    const QString& allowedOptionsDelimeter() const;\n\n\n    const RawValues_t& vals() const;\n    void setVals(const RawValues_t &vals);\n\n    int argIdx() const;\n    void setArgIdx(int argIdx);\n\n    const QString& defaultTriggerStr() const;\n\n    bool internalOnly() const;\n    void setInternalOnly(bool internalOnly);\n\n    void addRequiredArg(const QOptArg* arg);\n\n    const QVector<const QOptArg *>& requiredArs() const;\n\n    bool isFinalizeFlag() const;\n    void setFinalizeFlag(bool f);\n\n    void setIsByteSizeArg(bool isByteSizeArg);\n\n    void setIsRelativeDateTime(bool isRelativeDateTime, bool subtractIt);\n\nprotected:\n    [[noreturn]]\n    void throwgetValueCalledOnFlag(const char* functionname) const;\n\n    QString m_shortName;\n    QString m_name;\n    QString m_description;\n    bool m_hasValue;\n    QOptArgTrigger m_optTrigger;\n    QString m_defaultTriggerStr;\n    int m_argIdx;\n    bool m_internalOnly;\n\n    RawValues_t m_vals;\n    QVector<const QOptArg*> m_requiredArs;\n\n    // after parse:\n    QString m_parsedTrigger;\n    std::unordered_set<QString> m_allowedOptions;\n    QString m_allowedOptionsDelimeter;\n    bool m_isFinalizeFlag;\n    bool m_isByteSizeArg;\n    bool m_isRelativeDateTime;\n    bool m_relativeDateTimeSubtract;\n};\n\n\n\n/// Get the first value and try to convert it\n/// to the target type (throws on error). If the value\n/// is empty (not parsed), the default one is returned.\n/// @throws ExcCfg\ntemplate <typename T>\nT QOptArg::getValue(const T& defaultValue) const{\n    if(! m_hasValue){\n        throwgetValueCalledOnFlag(__func__);\n    }\n\n    if(m_vals.len == 0){\n        return defaultValue;\n    }\n    T t;\n    try {\n        qVariantTo_throw(m_vals.argv[0], &t, false);\n    } catch (const ExcQVariantConvert& ex) {\n        throw ExcOptArgParse(ex.descrip() + \" (arg \" + m_name + ')' );\n    }\n    return t;\n}\n\n\n/// Try to convert all values\n/// to the target type (throws on error). If the values\n/// are empty (not parsed), the default ones are returned.\n/// @throws ExcCfg\ntemplate <typename ContainerT>\nContainerT QOptArg::getValues(const ContainerT& defaultValues){\n    if(! m_hasValue){\n        throwgetValueCalledOnFlag(__func__);\n    }\n    if(m_vals.len == 0){\n        return defaultValues;\n    }\n    ContainerT container;\n    for(int i=0; i < m_vals.len; i++){\n        typename ContainerT::value_type t;\n        try {\n            qVariantTo_throw(m_vals.argv[i], &t, false);\n        } catch (const ExcQVariantConvert& ex) {\n            throw ExcOptArgParse(ex.descrip() + \" (arg \" + m_name + ')' );\n        }\n        container.push_back(t);\n    }\n    return container;\n\n}\n\n/// for a *single* argument string, whose values are separated by a delimter (e.g. comma)\n/// argFoo 1,2,3\ntemplate <typename ContainerT>\nContainerT QOptArg::getValuesByDelim(const QString& delim, const ContainerT& defaultValues,\n                                     const int minValueSize, const int maxValueSize){\n    if(! m_hasValue){\n        throwgetValueCalledOnFlag(__func__);\n    }\n    if(m_vals.len == 0){\n        return defaultValues;\n    }\n    ContainerT container;\n    const auto splittedVals = QString(m_vals.argv[0]).split(delim, Qt::SkipEmptyParts);\n    if(splittedVals.size() < minValueSize || splittedVals.size() > maxValueSize){\n         throw ExcOptArgParse(qtr(\"argument %1 requires at least %2 and at most %3 \"\n                                  \"parameters, separated by '%4' but %5 were given.\")\n                                    .arg(m_name).arg(minValueSize).arg(maxValueSize)\n                                    .arg(delim).arg(splittedVals.size()));\n    }\n    for(const QString & val : splittedVals){\n        typename ContainerT::value_type t;\n        try {\n            qVariantTo_throw(val, &t, false);\n        } catch (const ExcQVariantConvert& ex) {\n            throw ExcOptArgParse(ex.descrip() + \" (arg \" + m_name + ')' );\n        }\n        container.push_back(t);\n    }\n    return container;\n}\n\n\n/// Same as getValues(), but returns a QVariantList.\n/// The template parameter is there to convert the values into\n/// the target type right here.\ntemplate<typename T>\nQVariantList QOptArg::getVariantValues(const QVariantList& defaultValues)\n{\n    if(! m_hasValue){\n        throwgetValueCalledOnFlag(__func__);\n    }\n    if(m_vals.len == 0){\n        return defaultValues;\n    }\n    QVariantList l;\n    for(int i=0; i < m_vals.len; i++){\n        T t;\n        try {\n            qVariantTo_throw(m_vals.argv[i], &t, false);\n        } catch (const ExcQVariantConvert& ex) {\n            throw ExcOptArgParse(ex.descrip() + \" (arg \" + m_name + ')' );\n        }\n        l.push_back(t);\n    }\n    return l;\n}\n\n\n"
  },
  {
    "path": "src/common/qoptargparse/qoptargparse.cpp",
    "content": "\n#include <QDebug>\n#include <cassert>\n#include <unistd.h>\n#include <sys/ioctl.h>\n\n\n#include \"qoptargparse.h\"\n#include \"qoptsqlarg.h\"\n#include \"qoptvarlenarg.h\"\n#include \"excoptargparse.h\"\n#include \"cleanupresource.h\"\n#include \"qoutstream.h\"\n#include \"cpp_exit.h\"\n#include \"qformattedstream.h\"\n\nusing RawValues_t = QOptArg::RawValues_t;\n\nnamespace {\n\n/// Consume the next arguments according to the found trigger (if any).\n/// Store the found values in arg\nvoid consumeOptArgs(int argc, char *argv[], int& i, QOptArg& arg){\n\n    auto & optionalTrigger = arg.optTrigger();\n    QString preprocessedTrigger = arg.preprocessTrigger(argv[i]);\n\n    auto foundTrigger = optionalTrigger.trigger().find(preprocessedTrigger);\n    if(foundTrigger != optionalTrigger.trigger().end()) {\n        arg.setParsedTrigger(preprocessedTrigger);\n        // trigger given and consumed -> head to next arg\n        ++i;\n    } else {\n        // The trigger is optional, probably it was leaved out (or mistyped).\n        // In that case, the default trigger is used.\n        // Note that the default trigger *must* be part of the allowed\n        // trigger-set.\n        foundTrigger = optionalTrigger.trigger().find(arg.defaultTriggerStr());\n        assert(foundTrigger != optionalTrigger.trigger().end());\n    }\n\n    // Depending on the trigger a different count of values can be consumed\n    RawValues_t v;\n    v.argv = &argv[i];\n\n    for(v.len = 0; v.len < foundTrigger.value(); v.len++){\n        if(i >= argc){\n            // Out of args - delegate it to business logic\n            break;\n        }\n        // This parser does *not* support empty commandline arguments.\n        if(argv[i][0] == '\\0'){\n            throw ExcOptArgParse(qtr(\"%1 has an empty value\").arg(arg.name()));\n        }\n        ++i;\n    }\n\n    arg.setVals(v);\n}\n\n\n/// A var-len argument starts with the count of following arguments, which shall be consumed\nvoid consumeVarLenArg(int argc, char *argv[], int& i, QOptVarLenArg* arg){\n    const char* nArgStr = argv[i];\n    int nArgs;\n    try {\n        qVariantTo_throw(nArgStr, &nArgs, false);\n    } catch (const ExcQVariantConvert&) {\n        throw ExcOptArgParse(qtr(\"The argument %1 expects an integer as first value \"\n                                 \"but %2 was given\").arg(arg->name(), nArgStr));\n    }\n    // increment for nArgStr and all following values\n    RawValues_t v;\n    v.argv = &argv[++i];\n    v.len = nArgs;\n\n    i += nArgs;\n    if(i > argc){\n         throw ExcOptArgParse(qtr(\"%1: too few arguments left (%2 required)\")\n                              .arg(arg->name()).arg(nArgs));\n    }\n    arg->setVals(v);\n}\n\n} // namespace\n\n\n\n\nQOptArgParse::QOptArgParse() = default;\n\n\nvoid QOptArgParse::addArg(QOptArg *arg)\n{\n    assert(m_args.find(arg->name()) == m_args.end());\n\n    m_args.insert({arg->name(), arg});\n    if(! arg->shortName().isEmpty()){\n        // short names are optional in which case they are empty\n        m_argsShort.insert({arg->shortName(), arg});\n    }\n}\n\n/// Parse the commandline for all previously added arguments (and for\n/// -h, --help, after which the application EXITS).\n/// Parsing starts at argv[0], so in case it was received from the parent process,\n/// rather increment it first (++argv; argc--;)\n/// @throws ExcOptArgParse\nvoid QOptArgParse::parse(int argc, char *argv[])\n{\n    // to know which args we got, create a copy and delete\n    // elements on match\n    auto argsCopy = m_args;\n\n    if(argc > 0){\n        QByteArray first(argv[0]);\n        if(first == \"-h\" || first == \"--help\"){\n            printHelp();\n            cpp_exit(0);\n        }\n    }\n    // generate the vector here and not in addArg to allow\n    // for adding requirements *after* an argument was added\n    // to the parser\n    QVector<const QOptArg*> argsWithRequirements;\n\n    for(int i=0; i < argc; ){\n        tsl::ordered_map<QString, QOptArg*>::iterator argIter;\n        const QString argStr = argv[i];\n\n        if(argStr.startsWith(\"--\")){\n            // search in long names\n            argIter = argsCopy.find(argStr);\n        } else {\n            // search in short names but then try to refind it in argsCopy (long names),\n            // to know, which args we got\n            argIter = m_argsShort.find(argStr);\n            if(argIter == m_argsShort.end()){\n                argIter = argsCopy.end();\n            } else {\n                argIter = argsCopy.find(argIter.value()->name());\n            }\n        }\n\n        if(argIter == argsCopy.end()){\n            if(m_args.find(argStr) != m_args.end()){\n                // maybe_todo: add mulit arg, if required and perform dynamic_cast to that\n                // subclass\n                throw ExcOptArgParse(argStr + qtr(\" was passed multiple times\"));\n            }\n            // We are done. Store rest-ptr\n            m_rest.argv = &argv[i];\n            m_rest.len = argc - i;\n\n            break;\n        }\n        // remember that we got this arg\n        auto deleteArgLater = finally([&argsCopy, &argIter] {\n            argsCopy.erase(argIter);\n        });\n\n        QOptArg* arg = argIter.value();\n        arg->setArgIdx(i);\n        if(! arg->requiredArs().isEmpty()){\n            argsWithRequirements.push_back(arg);\n        }\n\n        if(! arg->hasValue()){\n            // a simple flag\n            if(arg->isFinalizeFlag()){\n                // We are done. Store rest-ptr\n                ++i;\n                m_rest.argv = &argv[i];\n                m_rest.len = argc - i;\n                if(m_rest.len == 0){\n                    throw ExcOptArgParse(qtr(\"'%1' passed without further arguments\").arg(arg->name()));\n                }\n                break;\n            }\n            ++i;\n            continue;\n        }\n        if(++i >= argc){\n            throw ExcOptArgParse(qtr(\"Missing value for %1\").arg(arg->name()));\n        }\n        if(argv[i][0] == '\\0'){\n            // This parser does *not* support empty commandline arguments.\n            throw ExcOptArgParse(qtr(\"%1 has an empty value\").arg(arg->name()));\n        }\n        auto* varLenArg = dynamic_cast<QOptVarLenArg*>(arg);\n        // each of below cases has to point i to the next argument to be parsed!\n        if(varLenArg != nullptr){\n            consumeVarLenArg(argc, argv, i, varLenArg);\n        } else if(! arg->optTrigger().isEmpty()){\n            // special \"feature\" of this parser: consume the next argument(s) according\n            // to the given, xor to the default trigger word.\n            consumeOptArgs(argc, argv, i, *arg);\n        } else {\n            RawValues_t v;\n            v.argv = &argv[i];\n            v.len = 1;\n            arg->setVals(v);\n            ++i;\n        }\n\n    }\n\n    for(const QOptArg* argWithReq : argsWithRequirements){\n        for(const QOptArg* requiremnt : argWithReq->requiredArs()){\n            if(! requiremnt->wasParsed()){\n                throw ExcOptArgParse(qtr(\"'%1' is required by '%2' but was not parsed.\")\n                                     .arg(requiremnt->name(), argWithReq->name()));\n            }\n        }\n    }\n\n\n\n}\n\nQOptArg::RawValues_t &QOptArgParse::rest()\n{\n    return m_rest;\n}\n\n\nvoid QOptArgParse::setHelpIntroduction(const QString &txt)\n{\n    m_helpIntroduction = txt;\n}\n\nvoid QOptArgParse::printHelp()\n{\n    QFormattedStream s(stdout);\n\n    struct winsize termWinSize;\n    ioctl(STDOUT_FILENO, TIOCGWINSZ, &termWinSize);\n    if(termWinSize.ws_col > 10 &&  termWinSize.ws_col < 80 ){\n        s.setMaxLineWidth(termWinSize.ws_col);\n    } else {\n        s.setMaxLineWidth(80);\n    }\n\n    s << m_helpIntroduction\n      << \"\\n-h, --help :\"\n      << qtr(\"Print this help and exit\") << \"\\n\";\n\n    const QString indent = \"      \";\n    for(const auto &nameArgPair : m_args){\n        if(nameArgPair.second->internalOnly()){\n            continue;\n        }\n        QString shortNameStr = nameArgPair.second->shortName();\n        if(! shortNameStr.isEmpty()){\n            shortNameStr += \", \";\n        }\n        QString value;\n        if(nameArgPair.second->hasValue()){\n            if(nameArgPair.second->allowedOptions().empty()){\n                // first two characters are --\n                value =nameArgPair.second->name()[2];\n            } else {\n                for(const QString& str : nameArgPair.second->allowedOptions()){\n                    value += str + nameArgPair.second->allowedOptionsDelimeter();\n                }\n                value.resize(value.size() - nameArgPair.second->allowedOptionsDelimeter().size());\n            }\n        }\n        s << shortNameStr << nameArgPair.second->name();\n        s.setLineStart(indent);\n        s << value << \": \" << nameArgPair.second->description() << \"\\n\";\n        s.setLineStart(\"\");\n\n    }\n}\n"
  },
  {
    "path": "src/common/qoptargparse/qoptargparse.h",
    "content": "#pragma once\n\n#include <QString>\n#include <QHash>\n\n#include \"qoptarg.h\"\n#include \"ordered_map.h\"\n\n/// Currently no support for having the same argument multiple times\nclass QOptArgParse\n{\npublic:\n    QOptArgParse();\n    void addArg(QOptArg* arg );\n\n    void parse(int argc, char *argv[]);\n\n    QOptArg::RawValues_t& rest();\n\n    void setHelpIntroduction(const QString& txt);\n\nprivate:\n    tsl::ordered_map<QString, QOptArg*> m_args;\n    tsl::ordered_map<QString, QOptArg*> m_argsShort;\n    QOptArg::RawValues_t m_rest;\n    QString m_helpIntroduction;\n\n    void printHelp();\n};\n\n"
  },
  {
    "path": "src/common/qoptargparse/qoptargtrigger.cpp",
    "content": "#include \"qoptargtrigger.h\"\n\n#include <utility>\n\nQOptArgTrigger::QOptArgTrigger() = default;\n\nQOptArgTrigger::QOptArgTrigger(QOptArgTrigger::TriggerEntries trigger) :\n    m_trigger(std::move(trigger))\n{\n\n}\n\nconst QOptArgTrigger::TriggerEntries &QOptArgTrigger::trigger() const\n{\n    return m_trigger;\n}\n\nbool QOptArgTrigger::isEmpty() const\n{\n    return m_trigger.isEmpty();\n}\n\nvoid QOptArgTrigger::setTrigger(const TriggerEntries &trigger)\n{\n    m_trigger = trigger;\n}\n\n\n"
  },
  {
    "path": "src/common/qoptargparse/qoptargtrigger.h",
    "content": "#pragma once\n\n#include <unordered_set>\n#include <QString>\n#include <QHash>\n\n#include \"util.h\"\n\n/// Allow the consumption of multiple commandline arguments, in case\n/// a trigger word is given. Example:\n/// If the trigger word <-between> is given, two values shall be consumed,\n/// if <-greater> is given, one value shall be consumed.\nclass QOptArgTrigger\n{\npublic:\n    // store for each trigger, how many values shall be consumed\n    typedef QHash<QString, int> TriggerEntries;\n\n    QOptArgTrigger();\n    QOptArgTrigger(TriggerEntries  pTrigger);\n\n\n    const TriggerEntries& trigger() const;\n    void setTrigger(const TriggerEntries &trigger);\n\n    bool isEmpty() const;\n\n\nprivate:\n    TriggerEntries m_trigger;\n};\n\n"
  },
  {
    "path": "src/common/qoptargparse/qoptsqlarg.cpp",
    "content": "\n#include <QDebug>\n\n#include \"qoptsqlarg.h\"\n#include \"staticinitializer.h\"\n\n\nusing TriggerDefinitions = QOptArgTrigger::TriggerEntries;\n\nnamespace  {\n\n\nconst QOptArgTrigger& allArgTrigger(){\n    static QOptArgTrigger allArgTrigger;\n    static StaticInitializer loader( [](){\n        TriggerDefinitions triggerDefs;\n        for(const E_CompareOperator& op : QOptSqlArg::cmpOpsAll()){\n            int countOfConsumeVals;\n            switch (op) {\n            case E_CompareOperator::BETWEEN: countOfConsumeVals=2 ;break;\n            default: countOfConsumeVals=1 ;break;\n            }\n            triggerDefs.insert(CompareOperator(op).asTerminal(), countOfConsumeVals );\n        }\n        allArgTrigger.setTrigger(triggerDefs);\n    });\n    return  allArgTrigger;\n}\n\n} // namespace\n\n\n\n\nconst QOptSqlArg::CompareOperators &QOptSqlArg::cmpOpsAll()\n{\n    static const QOptSqlArg::CompareOperators ops = {\n        E_CompareOperator::GT,\n        E_CompareOperator::GE,\n        E_CompareOperator::LT,\n        E_CompareOperator::LE,\n        E_CompareOperator::EQ,\n        E_CompareOperator::NE,\n        E_CompareOperator::LIKE,\n        E_CompareOperator::BETWEEN\n    };\n    return ops;\n}\n\nconst QOptSqlArg::CompareOperators &QOptSqlArg::cmpOpsAllButLike()\n{\n    static const QOptSqlArg::CompareOperators ops = {\n        E_CompareOperator::GT,\n        E_CompareOperator::GE,\n        E_CompareOperator::LT,\n        E_CompareOperator::LE,\n        E_CompareOperator::EQ,\n        E_CompareOperator::NE,\n        E_CompareOperator::BETWEEN\n    };\n    return ops;\n}\n\nconst QOptSqlArg::CompareOperators &QOptSqlArg::cmpOpsText()\n{\n    static const QOptSqlArg::CompareOperators ops = {\n        E_CompareOperator::EQ,\n        E_CompareOperator::NE,\n        E_CompareOperator::LIKE\n    };\n    return ops;\n}\n\nconst QOptSqlArg::CompareOperators &QOptSqlArg::cmpOpsEqNe()\n{\n    static const QOptSqlArg::CompareOperators ops = {\n        E_CompareOperator::EQ,\n        E_CompareOperator::NE\n    };\n    return ops;\n}\n\n\nQOptSqlArg::QOptSqlArg(const QString &shortName,\n                       const QString &name,\n                       const QString &description,\n                       const CompareOperators &supportedOperators,\n                       const E_CompareOperator &defaultOperator) :\n    QOptArg(shortName, name, description, allArgTrigger(),\n             CompareOperator(defaultOperator).asTerminal()),\n    m_parsedOperator(defaultOperator),\n    m_supportedOperators(supportedOperators)\n{\n    if(supportedOperators.empty()){\n        throw QExcIllegalArgument(\"supportedOperators is empty\");\n    }\n    if(! supportedOperators.contains(defaultOperator)){\n        throw QExcIllegalArgument(\"supportedOperators does not contain defaultOperator\");\n    }\n}\n\nvoid QOptSqlArg::setParsedTrigger(const QString &parsedTrigger)\n{\n    QOptArg::setParsedTrigger(parsedTrigger);\n    CompareOperator op = CompareOperator();\n    if(! op.fromTerminal(parsedTrigger)){\n        throw ExcOptArgParse(qtr(\"Failed to convert %1 to a sql comparison operator\")\n                             .arg(parsedTrigger));\n    }\n    if(! m_supportedOperators.contains(op.asEnum())){\n        throw ExcOptArgParse(qtr(\"%1 is not a supported sql comparison operator for %2\")\n                             .arg(parsedTrigger, name()));\n    }\n    m_parsedOperator = op.asEnum();\n}\n\n/// All sql parameters are processed internally as lower strings.\nQString QOptSqlArg::preprocessTrigger(const char *str) const\n{\n    return QString(str).toLower();\n}\n\nQString QOptSqlArg::description() const\n{\n    QStringList operators;\n    for(const auto& op : m_supportedOperators){\n        operators.push_back(CompareOperator(op).asTerminal());\n    }\n\n    return m_description +\n            qtr(\" Supported operators: %1. Default operator: %2\")\n                .arg(operators.join(\", \"), m_defaultTriggerStr);\n}\n\nE_CompareOperator QOptSqlArg::parsedOperator() const\n{\n    return m_parsedOperator;\n}\n"
  },
  {
    "path": "src/common/qoptargparse/qoptsqlarg.h",
    "content": "#pragma once\n\n#include <QVector>\n\n#include \"qoptarg.h\"\n#include \"compareoperator.h\"\n\n\nclass QOptSqlArg : public QOptArg\n{\npublic:\n    typedef QVector<E_CompareOperator> CompareOperators;\n\n    static const CompareOperators& cmpOpsAll();\n    static const CompareOperators& cmpOpsAllButLike();\n    static const CompareOperators& cmpOpsText();\n    static const CompareOperators& cmpOpsEqNe();\n\n    QOptSqlArg(const QString& shortName, const QString & name,\n               const QString& description,\n               const CompareOperators& supportedOperators,\n               const E_CompareOperator& defaultOperator=E_CompareOperator::EQ);\n\n    void setParsedTrigger(const QString &parsedTrigger) override;\n    QString preprocessTrigger(const char* str) const override;\n\n    QString description() const override;\n\n    E_CompareOperator parsedOperator() const;\n\nprivate:\n    E_CompareOperator m_parsedOperator;\n    CompareOperators m_supportedOperators;\n};\n\n"
  },
  {
    "path": "src/common/qoptargparse/qoptvarlenarg.cpp",
    "content": "#include \"qoptvarlenarg.h\"\n\n\n\n\nQOptVarLenArg::QOptVarLenArg(const QString &shortName,\n                             const QString &name,\n                             const QString &description) :\n    QOptArg(shortName, name, description, true)\n{}\n"
  },
  {
    "path": "src/common/qoptargparse/qoptvarlenarg.h",
    "content": "#pragma once\n\n#include \"qoptarg.h\"\n\nclass QOptVarLenArg : public QOptArg\n{\npublic:\n    QOptVarLenArg(const QString& shortName, const QString & name,\n                  const QString& description);\n};\n\n"
  },
  {
    "path": "src/common/qresource_helper.cpp",
    "content": "#include \"qresource_helper.h\"\n#include \"compat.h\"\n\n///\n/// \\brief qresource_helper::data_safe uncompress data as neeeded\n/// \\param r\n/// \\return\n///\nQByteArray qresource_helper::data_safe(QResource &r)\n{\n    QByteArray data = Qt::resourceIsCompressed(r) ? qUncompress(r.data(), int(r.size())) :\n         QByteArray(reinterpret_cast<const char*>(r.data()));\n    return data;\n}\n"
  },
  {
    "path": "src/common/qresource_helper.h",
    "content": "#pragma once\n\n#include <QResource>\n\nnamespace qresource_helper  {\n\nQByteArray data_safe(QResource& r);\n\n}\n\n"
  },
  {
    "path": "src/common/qsimplecfg/CMakeLists.txt",
    "content": "\n\nadd_library(lib_qsimplecfg\n  cfg.cpp\n  section.cpp\n  exccfg.cpp\n )\n\ntarget_link_libraries(lib_qsimplecfg PUBLIC\n    Qt5::Core\n    lib_util\n    oscpp_lib\n    lib_orderedmap\n)\n\n\n"
  },
  {
    "path": "src/common/qsimplecfg/cfg.cpp",
    "content": "#include <cassert>\n\n#include <QFile>\n#include <QFileInfo>\n#include <QTextStream>\n#include <QDir>\n#include <QDebug>\n#include <QLockFile>\n\n#include \"cfg.h\"\n#include \"exccfg.h\"\n#include \"util.h\"\n#include \"qformattedstream.h\"\n#include \"cflock.h\"\n#include \"excos.h\"\n#include \"os.h\"\n#include \"interrupt_handler.h\"\n\n\nnamespace  {\n\n\nvoid setStreamCommentMode(QFormattedStream& s){\n    s.setMaxLineWidth(80);\n    s.setLineStart(\"# \");\n}\n\nvoid unsetStreamCommentMode(QFormattedStream& s){\n    s.setMaxLineWidth(std::numeric_limits<int>::max());\n    s.setLineStart(\"\");\n}\n\nvoid writeMultiLineKey(QFormattedStream& stream, const QString& keyname,\n                       const QString& value){\n    assert(stream.streamChunkSep() == '\\n');\n\n    auto oldMaxLineWidth = stream.maxLineWidth();\n    stream.setMaxLineWidth(std::numeric_limits<int>::max());\n    const QString TRIPLE_QUOTE = \"'''\";\n    stream << keyname + \" = \" + TRIPLE_QUOTE;\n    auto oldLineStart = stream.lineStart();\n    // repsecting oldLineStart makes this function compatible\n    // with comment and normal mode.\n    stream.setLineStart(oldLineStart + \"   \");\n    stream << value;\n    stream.setLineStart(oldLineStart);\n    stream << TRIPLE_QUOTE;\n    stream.setMaxLineWidth(oldMaxLineWidth);\n}\n} // namespace\n\n\n/// Parse the config file at filepath. Create it, if necessary.\n/// Note that the content of multi-line strings between triple quotes\n/// is parsed \"as is\", except for an optional final \\n, if the closing triple\n/// quotes are in the next line. Example:\n/// '''\n/// text\n/// '''\n/// -> no \\n after <text> (but before it there is one).\n/// So it does not matter, whether the closing triple quotes are in the same\n/// or the next line.\n/// @throws ExcCfg\nqsimplecfg::Cfg::Cfg() :\n    m_allowEraseSections(true)\n{}\n\nvoid qsimplecfg::Cfg::parse(const QString &filepath)\n{\n    createDirsToFilename(filepath);\n    QFile file(filepath);\n\n    if(! file.open(QIODevice::OpenModeFlag::ReadOnly | QIODevice::OpenModeFlag::Text)){\n        throw ExcCfg(qtr(\"Failed to open %1 - %2\").\n                     arg(filepath, file.errorString()));\n    }\n    parse(file);\n}\n\n/// @overload\n/// @param file: parse the already for reading opened file (whose offset should typically be zero).\nvoid qsimplecfg::Cfg::parse(QFile &file)\n{\n    QTextStream in(&file);\n    try{\n        parse(&in);\n    } catch(ExcCfg & ex){\n        ex.setDescrip(ex.descrip() +\n                       qtr(\". Please correct the file at %1\").arg(QFileInfo(file).absoluteFilePath()));\n        throw;\n    }\n}\n\nvoid qsimplecfg::Cfg::store(const QString &filepath)\n{\n    createDirsToFilename(filepath);\n    QFile file(filepath);\n    if(! file.open(QIODevice::OpenModeFlag::WriteOnly | QIODevice::OpenModeFlag::Text)){\n        throw ExcCfg(qtr(\"Failed to open %1 - %2\").\n                     arg(filepath, file.errorString()));\n    }\n    store(file);\n}\n\n/// Save config at given filepath. **Not** safe against races.\n/// @throws ExcCfg\nvoid qsimplecfg::Cfg::store(QFile &file)\n{\n    QFormattedStream stream(&file);\n    stream.setStreamChunkSep('\\n');\n    unsetStreamCommentMode(stream);\n\n    if(! m_initialComments.isEmpty()){\n        setStreamCommentMode(stream);\n        stream << m_initialComments;\n        unsetStreamCommentMode(stream);\n    }\n    stream << \"\\n\\n\";\n\n    for(const auto& nameSect : m_nameSectionHash){\n        stream << '[' + nameSect.second.sect->sectionName() + \"]\";\n        writeSectionCommentsToStream(nameSect.second.sect, stream);\n        writeSectionToStream(nameSect.second.sect, stream);\n        stream << \"\\n\\n\";\n    }\n}\n\n\n/// Get a parsed section or create a new one with given name.\n/// The order in which operator[] is called,\n/// determines the order in which it will be stored to disk on Cfg::store().\n/// Parsed sections ( see parse() ) which were not requested via this function,\n/// will *not* be store()'ed. The idea is that the config scheme is autogenerated\n/// by requesting the sections and keys.\n/// Note that calling this function a second time with the same section name,\n/// after another section was created, does *not* change the order.\n/// @return the Section_Ptr is never null.\nqsimplecfg::Cfg::Section_Ptr qsimplecfg::Cfg::operator[](const QString &sectName)\n{\n    auto parsedIt = m_parsedNameSectionHash.find(sectName);\n    if(parsedIt != m_parsedNameSectionHash.end()){\n        SectWithMeta sectMeta;\n        sectMeta.sect = parsedIt->second;\n        m_nameSectionHash.insert({sectName, sectMeta});\n        m_parsedNameSectionHash.erase(parsedIt);\n        return sectMeta.sect;\n    }\n\n    // section was not parsed or requested a second time: get or create:\n    auto & sectMeta = m_nameSectionHash[sectName];\n    if(sectMeta.sect == nullptr){\n        sectMeta.sect = make_shared_section(sectName);\n    }\n    return sectMeta.sect;\n}\n\n\nvoid qsimplecfg::Cfg::handleParseKeyValue(QStringRef &line, size_t *pLineNumber,\n                                     QTextStream *stream,const Section_Ptr& section)\n{\n    int equalIdx = line.indexOf('=');\n    if(equalIdx == -1){\n        throw ExcCfg(qtr(\"Line %1 - %2: Unexpected content (missing =)\").\n                     arg(*pLineNumber).arg(line.toString()));\n    }\n\n    QStringRef key = line.left(equalIdx).trimmed();\n    QStringRef value = line.mid(equalIdx + 1).trimmed();\n    if(! value.startsWith(\"'''\")){\n        // simple case: not a multi-line string\n        section->insert(key.toString(), value.toString());\n        return;\n    }\n\n    // ignore leading '''\n    value = value.mid(3);\n    // still possible that string ends in same line:\n    int tripleIdx = value.indexOf(\"'''\");\n    if(tripleIdx != -1){\n        if(tripleIdx != value.length()-3){\n            throw ExcCfg(qtr(\"Line %1 - %2: content after closing triple quotes '''\").\n                         arg(*pLineNumber).arg(line.toString()));\n        }\n        section->insert(key.toString(), value.left(value.size() - 3).toString());\n        return;\n    }\n\n    m_keyValBuf = value.toString();\n\n    // mutli line string: keep going through file until the\n    // next '''\n    size_t startingLine = *pLineNumber;\n    while (true) {\n        if(! readLineInto(*stream, &m_keyValReadBuf)){\n            break;\n        }\n        (*pLineNumber)++;\n        QStringRef currentLine(&m_keyValReadBuf);\n        currentLine = currentLine.trimmed();\n\n        tripleIdx = currentLine.indexOf(\"'''\");\n        if(tripleIdx == -1){\n            // keep \\n's for later split\n            m_keyValBuf += '\\n' + currentLine.toString() ;\n            continue;\n        }\n        if(tripleIdx != currentLine.length()-3){\n            throw ExcCfg(qtr(\"Line %1 - %2: content after closing triple quotes '''\").\n                         arg(*pLineNumber).arg(currentLine.toString()));\n        }\n        if(tripleIdx != 0){            \n            m_keyValBuf += '\\n' + currentLine.left(currentLine.size() - 3).toString();\n        }\n        section->insert(key.toString(), m_keyValBuf);\n        return;\n    }\n    throw ExcCfg(qtr(\"Line %1 - %2: missing closing triple quotes '''\").\n                 arg(startingLine).arg(line.toString()));\n\n\n}\n\nvoid\nqsimplecfg::Cfg::writeKeyValue(const QString &key,\n                               const QString &val,\n                               const QString &sep,\n                               QFormattedStream &stream)\n{\n    if(sep.contains('\\n') ||\n            val.contains('\\n')){\n        writeMultiLineKey(stream, key, val);\n    } else {\n        stream << key + \" = \" + val;\n    }\n}\n\nvoid\nqsimplecfg::Cfg::writeSectionToStream(const qsimplecfg::Cfg::Section_Ptr &sect,\n                                      QFormattedStream &stream)\n{\n    for(const auto & keyValMeta : sect->keyValHash()){\n        QString valStr;\n        if(keyValMeta.second.insertDefault){\n            valStr =   QVariantListToString(\n                        keyValMeta.second.defaultValues,\n                        keyValMeta.second.separator);\n        } else {\n            if(keyValMeta.second.rawStr.isNull()){\n                // No value was parsed and default shall\n                // not be inserted -> do not write this\n                // key to file.\n                continue;\n            }\n            valStr = keyValMeta.second.rawStr.trimmed();\n        }\n        writeKeyValue(keyValMeta.first, valStr, keyValMeta.second.separator, stream);\n    }\n}\n\nvoid\nqsimplecfg::Cfg::writeSectionCommentsToStream(const qsimplecfg::Cfg::Section_Ptr &sect,\n                                                   QFormattedStream &stream)\n{\n    setStreamCommentMode(stream);\n    if(! sect->comments().isEmpty()){\n        stream << sect->comments() + '\\n';\n    }\n    for(const auto& keyValMeta : sect->keyValHash()){\n        const auto & key = keyValMeta.first;\n        const auto & valMeta = keyValMeta.second;\n        if(! valMeta.insertDefaultToComments){\n            continue;\n        }\n        const QString defaultValStr =  QVariantListToString(valMeta.defaultValues, valMeta.separator);\n        writeKeyValue(key, defaultValStr, valMeta.separator, stream);\n    }\n    unsetStreamCommentMode(stream);\n}\n\n\n/// @throws ExcCfg\nvoid\nqsimplecfg::Cfg::createDirsToFilename(const QString &filename)\n{\n    assert(! filename.isEmpty());\n    QFileInfo fileInfo(filename);\n    if(! QDir().mkpath(fileInfo.absolutePath())){\n        throw ExcCfg(qtr(\"Failed to create directories for path %1\")\n                                 .arg(fileInfo.absolutePath()) );\n    }\n}\n\n\nQString\nqsimplecfg::Cfg::QVariantListToString(const QVariantList &l, const QString &sep)\n{\n    QString str;\n    for(const auto& v : l){\n        str += qVariantTo_throw<QString>(v) + sep;\n    }\n    return str;\n}\n\n\n\nvoid qsimplecfg::Cfg::setInitialComments(const QString &comments)\n{\n    m_initialComments = comments;\n}\n\n\n\n/// Return all sections and their keys which were not read after having been\n/// inserted.\nqsimplecfg::NotReadSectionKeys\nqsimplecfg::Cfg::generateNonReadSectionKeyPairs()\n{\n    NotReadSectionKeys allNotRead;\n    for(auto it = m_nameSectionHash.begin(); it != m_nameSectionHash.end(); ++it){\n       auto notReadKeys = it.value().sect->notReadKeys();\n       if(! notReadKeys.empty()){\n           allNotRead.push_back({it.key(), notReadKeys});\n       }\n    }\n    return allNotRead;\n}\n\n/// Rename a parsed section. Warning: it is *not* allowed, to call this function\n/// after having accessed a section via operator[], because that would destroy\n/// the order of the sections.\n/// @return true, if the old section existed.\nbool qsimplecfg::Cfg::renameParsedSection(const QString &oldName, const QString &newName)\n{\n    assert(m_nameSectionHash.empty());\n    auto oldIt = m_parsedNameSectionHash.find(oldName);\n    if(oldIt == m_parsedNameSectionHash.end()){\n        return false;\n    }\n    Section_Ptr sect = oldIt->second;\n    sect->setSectionName(newName);\n    m_parsedNameSectionHash.erase(oldIt);\n\n    m_parsedNameSectionHash[newName] = sect;\n    return true;\n}\n\n/// @return those sections which were parsed but not accessed via operator[]\nconst qsimplecfg::Cfg::ParsedNameSectionHash&\nqsimplecfg::Cfg::getParsedButNotReadNameSectionHash() const\n{\n    return m_parsedNameSectionHash;\n}\n\n\n/// Get a parsed section. Warning: it is *not* allowed, to call this function\n/// after having accessed a section via operator[], because that would destroy\n/// the order of the sections. So call this function after Cfg::parse but before\n/// accessing any section via operator[].\n/// @return the parsed section or null\nqsimplecfg::Cfg::Section_Ptr qsimplecfg::Cfg::getParsedSectionIfExist(const QString &sectName)\n{\n    assert(m_nameSectionHash.empty());\n    auto it = m_parsedNameSectionHash.find(sectName);\n    if(it == m_parsedNameSectionHash.end()){\n        return nullptr;\n    }\n    return it->second;\n}\n\n/// @overload\nvoid qsimplecfg::Cfg::parse(QTextStream *in)\n{\n    m_parsedNameSectionHash.clear();\n    m_nameSectionHash.clear();\n    m_initialComments.clear();\n\n    bool withinSection=false;\n    Section_Ptr currentSect;\n    QString currentSectName;\n    size_t currentLine = 0;\n\n    QString lineBuf;\n    lineBuf.reserve(8192);\n    while (true) {\n        if(! readLineInto(*in, &lineBuf)){\n            break;\n        }\n\n        QStringRef line(&lineBuf);\n        line = line.trimmed();\n        currentLine++;\n\n        if(line.startsWith('#')){\n            // No point in reading comments.\n        } else if(line.isEmpty()){\n\n        } else if(line.startsWith('[')){\n            if(! line.endsWith(']')){\n                throw ExcCfg(qtr(\"Line %1 - %2: section start [ without closing end ] detected\").\n                             arg(currentLine).arg(line.toString()));\n            }\n            withinSection = true;\n            currentSectName = line.mid(1, line.size() - 2).toString();\n            if(currentSectName.isEmpty()){\n                throw ExcCfg(qtr(\"Line %1 - %2: empty section detected\").\n                             arg(currentLine).arg(line.toString()));\n            }\n            auto currentSectIt = m_parsedNameSectionHash.find(currentSectName);\n            if(currentSectIt != m_parsedNameSectionHash.end()){\n                throw ExcCfg(qtr(\"Line %1 - %2: section name already defined (in upper line)\").\n                             arg(currentLine).arg(line.toString()));\n            }\n            currentSect = make_shared_section(currentSectName);\n            m_parsedNameSectionHash.insert({currentSectName, currentSect});\n        } else {\n            if(! withinSection){\n                throw ExcCfg(qtr(\"Line %1 - %2: Content before first section\").\n                             arg(currentLine).arg(line.toString()));\n            }\n            handleParseKeyValue(line, &currentLine, in, currentSect );\n        }\n    }\n}\n"
  },
  {
    "path": "src/common/qsimplecfg/cfg.h",
    "content": "\n#pragma once\n\n#include <memory>\n#include <unordered_map>\n#include <QString>\n#include <QHash>\n#include <QStringList>\n\n#include \"section.h\"\n\nclass QTextStream;\nclass CfgTest; // unit test\nclass QFormattedStream;\n\nnamespace qsimplecfg {\n\n/// First Element of each pair is section-name, second a set of not read keys\ntypedef QVector<QPair<QString, std::unordered_set<QString> > > NotReadSectionKeys ;\n\n\n/// A simple parser for ini-like config files.\n/// The scheme of the config file, that is: the order of sections\n/// and keys and their default values is autogenerated by subsequent\n/// calls to Cfg::operator[] and Section::getValue. *Only* those sections\n/// and keys are written (back) to the config-file, which were accessed\n/// by these methods. The order written to file is exactly the access order.\n/// Scheme updates work by renaming sections (Cfg::renameParsedSection) and\n/// keys (Section::renameParsedKey) after Cfg::parse but before accessing\n/// a value via the above described methods. To rename a key, obtain\n/// the section via Cfg::getParsedSectionIfExist and *not* Cfg::operator[].\n///\n/// No subsections are supported but:\n/// - initial file comment\n/// - comments after a section header:\n///   [sectionname]\n///   # comment1\n///   # comment2\n/// - values over multiple lines with triple quotes ''':\n///   key = '''foo1\n///         foo2'''\n/// - comments are always re-written by the application on store(),\n///   but ignored when parsing the file.\nclass Cfg\n{\npublic:\n    typedef std::shared_ptr<Section> Section_Ptr;\n    typedef std::unordered_map<QString, Section_Ptr> ParsedNameSectionHash;\n\n    Cfg();\n\n    void parse(const QString& filepath);\n    void parse(QFile& file);\n    void store(const QString& filepath);\n    void store(QFile& file);\n\n    Section_Ptr operator[](const QString &sectName);\n\n    void setInitialComments(const QString &comments);\n\n    NotReadSectionKeys generateNonReadSectionKeyPairs();\n\n    bool renameParsedSection(const QString& oldName, const QString& newName);\n\n    const ParsedNameSectionHash& getParsedButNotReadNameSectionHash() const;\n\n    Section_Ptr getParsedSectionIfExist(const QString& sectName);\n\nprivate:\n    // maybe_todo: store plain section instead, if sure, that no metadata\n    // about it needs to be stored...\n    struct SectWithMeta {   \n        Section_Ptr sect;\n    };\n\n    typedef tsl::ordered_map<QString, SectWithMeta> SectionHash;\n\n    ParsedNameSectionHash m_parsedNameSectionHash; // parsed from file\n    SectionHash m_nameSectionHash; // accessed by user via operator[]\n    QString m_initialComments;\n    QString m_keyValReadBuf;\n    QString m_keyValBuf;\n    bool m_allowEraseSections;\n\n    void parse(QTextStream *in);\n    void handleParseKeyValue(QStringRef &line,\n                        size_t* pLineNumber,\n                        QTextStream* stream,\n                        const Section_Ptr &section);\n    void writeKeyValue(const QString& key, const QString& val,\n                       const QString& sep, QFormattedStream& stream);\n    void writeSectionToStream(const Section_Ptr& sect, QFormattedStream& stream);\n    void writeSectionCommentsToStream(const Section_Ptr& sect, QFormattedStream& stream);\n\n\n    static void createDirsToFilename(const QString& filename);\n\n    static std::shared_ptr<Section> make_shared_section(const QString& sectName)  {\n        // since section is private, but make_shared requires it to be public, the dummy inheritance is\n        // one soultion\n        struct allow_mk_shared : public Section {\n            allow_mk_shared(const QString& sectName) : Section(sectName) {}\n        };\n        return std::make_shared<allow_mk_shared>(sectName);\n    }\n    static QString QVariantListToString(const QVariantList& l, const QString& sep);\n\n\n    // unit test:\n    friend class ::CfgTest;\n};\n\n} // namespace qsimplecfg\n"
  },
  {
    "path": "src/common/qsimplecfg/exccfg.cpp",
    "content": "\n#include \"exccfg.h\"\n\n\n\nqsimplecfg::ExcCfg::ExcCfg(const QString &preamble) :\n    QExcCommon (preamble, false)\n{}\n\n"
  },
  {
    "path": "src/common/qsimplecfg/exccfg.h",
    "content": "#pragma once\n\n#include <QString>\n\n#include \"exccommon.h\"\n\nnamespace qsimplecfg {\n\nclass ExcCfg : public QExcCommon\n{\npublic:\n     ExcCfg(const QString & preamble);\n\n};\n\n\n} // namespace qsimplecfg\n"
  },
  {
    "path": "src/common/qsimplecfg/section.cpp",
    "content": "\n\n\n#include \"section.h\"\n#include \"conversions.h\"\n\nqsimplecfg::Section::Section(const QString &sectionName) :\n    m_sectionName(sectionName)\n{}\n\n\nvoid qsimplecfg::Section::insert(const QString &key, const QString &value)\n{\n    m_parsedKeyValHash.insert({key, value});\n    m_NotReadKeys.insert(key);\n}\n\n\n/// @see getValue().\nqint64 qsimplecfg::Section::getFileSize(const QString &key, const qint64 &defaultValue,\n                                        bool insertDefaultIfNotExist)\n{\n    try {\n        Conversions userStrConv;\n\n        return userStrConv.bytesFromHuman( this->getValue<QString>(\n                                        key,\n                                        userStrConv.bytesToHuman(defaultValue),\n                                        insertDefaultIfNotExist));\n    } catch (const ExcConversion& ex) {\n        throw qsimplecfg::ExcCfg(ex.descrip() + \" (key \" + key + ')' );\n    }\n}\n\n\nconst qsimplecfg::Section::KeyMetaValHash &qsimplecfg::Section::keyValHash()\n{\n    return m_keyValHash;\n}\n\n\nvoid qsimplecfg::Section::setComments(const QString &comments)\n{\n    m_comments = comments;\n    if(! comments.isEmpty() && comments[comments.size() - 1] != QChar::LineFeed){\n        m_comments.push_back(QChar::LineFeed);\n    }\n\n}\n\nconst QString& qsimplecfg::Section::comments() const\n{\n    return m_comments;\n}\n\n/// Affects subsequent calls to getValue(s): if true, default values will be written\n/// to comments on Cfg::store, else not.\nvoid qsimplecfg::Section::setInsertDefaultToComments(bool insertDefaultToComments)\n{\n    m_insertDefaultToComments = insertDefaultToComments;\n}\n\nvoid qsimplecfg::Section::removeFromNotReadKeysIfExist(const QString &key)\n{\n    auto it = m_NotReadKeys.find(key);\n    if(it != m_NotReadKeys.end()){\n        m_NotReadKeys.erase(it);\n    }\n}\n\nqsimplecfg::Section::ValueWithMeta&\nqsimplecfg::Section::generateValueWithMeta(const QString &key, const QString& separator,\n                                           const QVariantList& defaultValues,\n                                           bool insertDefaultIfNotExist)\n{\n    removeFromNotReadKeysIfExist(key);\n    ValueWithMeta & valWithMeta = m_keyValHash[key];\n    valWithMeta.insertDefaultToComments = m_insertDefaultToComments;\n    valWithMeta.separator = separator;\n    valWithMeta.defaultValues = defaultValues;\n\n    auto parsedValIt = m_parsedKeyValHash.find(key);\n    if(parsedValIt == m_parsedKeyValHash.end()){\n        valWithMeta.rawStr = QString();\n        valWithMeta.insertDefault = insertDefaultIfNotExist;\n    } else {\n        valWithMeta.rawStr = parsedValIt->second;\n    }\n    return valWithMeta;\n}\n\nvoid qsimplecfg::Section::setSectionName(const QString &sectionName)\n{\n    m_sectionName = sectionName;\n}\n\nconst QString& qsimplecfg::Section::sectionName() const\n{\n    return m_sectionName;\n}\n\n\n/// Rename a parsed key. Warning: it is *not* allowed, to call this function\n/// after having accessed a key via getValue(), because that would destroy\n/// the order of the keys. So call this function after Cfg::parse but before\n/// accessing any value.\nbool qsimplecfg::Section::renameParsedKey(const QString &oldName, const QString &newName)\n{\n    assert(m_keyValHash.empty());\n    auto oldIt = m_parsedKeyValHash.find(oldName);\n    if(oldIt == m_parsedKeyValHash.end()){\n        return false;\n    }\n    const QString value = oldIt->second;\n    m_parsedKeyValHash[newName] = value;\n    m_parsedKeyValHash.erase(oldIt);\n    return true;\n}\n\n\n/// return those keys which were not read via getValue[s]() after insert();\nconst std::unordered_set<QString>& qsimplecfg::Section::notReadKeys() const\n{\n    return m_NotReadKeys;\n}\n\n\n\n"
  },
  {
    "path": "src/common/qsimplecfg/section.h",
    "content": "\n#pragma once\n\n#include <unordered_set>\n#include <unordered_map>\n#include <QDebug>\n#include <QString>\n#include <QHash>\n#include <QVariant>\n\n#include \"compat.h\"\n#include \"ordered_map.h\"\n#include \"exccfg.h\"\n#include \"util.h\"\n#include \"generic_container.h\"\n\n\n\nnamespace qsimplecfg {\n\n/// A config section consisting of optional initial comments\n/// and key-value pairs. Example\n/// # Some comment\n/// key1 = val1\n/// key2 = val2\n///\n/// If insertDefaultToComments is true, add a comment of the form\n/// key = defaultValue when getValue is called, as hint for the user.\n/// Note that comments are ignored when parsing the config file, they are\n/// set by the application (and written to the file on store).\nclass Section\n{\npublic:\n\n    qint64 getFileSize(const QString & key, const qint64& defaultValue={},\n                       bool insertDefaultIfNotExist=false );\n\n    template <typename T>\n    T getValue(const QString & key, const T& defaultValue=T(),\n               bool insertDefaultIfNotExist=false);\n\n    template<class ContainerT>\n    ContainerT getValues(const QString & key, const ContainerT & defaultValue=ContainerT(),\n                         bool insertDefaultIfNotExist=false,\n                         const QString & separator=\",\",\n                         Qt::SplitBehavior splitbehaviour=Qt::SkipEmptyParts );\n\n\n    void setComments(const QString &comments);\n\n    const QString & comments() const;\n\n    void setInsertDefaultToComments(bool insertDefaultToComments);\n\n    const std::unordered_set<QString>& notReadKeys() const;\n\n    const QString &sectionName() const;\n\n    bool renameParsedKey(const QString& oldName, const QString& newName);\n\npublic:\n    ~Section() = default;\n    Q_DISABLE_COPY(Section)\n    DISABLE_MOVE(Section)\n\nprivate:\n    struct ValueWithMeta {\n        QString rawStr; // isNull() == true, if not parsed.\n        QString separator;\n        QVariantList defaultValues;\n        bool insertDefault {false};\n        bool insertDefaultToComments {true};\n    };\n\nprivate:\n    friend class Cfg;\n\n    typedef tsl::ordered_map<QString, ValueWithMeta> KeyMetaValHash;\n    // methods to be called from class Cfg:\n    Section(const QString& sectionName);\n    void setSectionName(const QString &sectionName);\n    void insert(const QString & key, const QString& value);\n    const KeyMetaValHash& keyValHash();\n    QString generateComments();\n\nprivate:\n\n    void removeFromNotReadKeysIfExist(const QString& key);\n    template <typename T>\n    T convertValueOrThrow(const QString& valueStr, const QString& keyname);\n    ValueWithMeta& generateValueWithMeta(const QString& key, const QString &separator,\n                                         const QVariantList &defaultValues,\n                                         bool insertDefaultIfNotExist);\n\n    QString m_comments;\n    std::unordered_map<QString, QString> m_parsedKeyValHash; // parsed from file\n    KeyMetaValHash m_keyValHash; // accessed by user via getValue(s)\n    bool m_insertDefaultToComments { true };\n    std::unordered_set<QString> m_NotReadKeys;\n    QString m_sectionName;\n};\n\n\n\n/// Find the 'value' correspondig to 'key' and try to convert it\n/// to the target type (throws on error). If the value is not\n/// found or is empty, the default one is returned, which is also inserted\n/// into the section, if param insertDefaultIfNotExist is true.\n/// The default value *must* be convertible to a string using qVariantTo<>.\n/// If possible, the default is stored as hint in the comments.\n/// @throws ExcCfg\ntemplate <typename T>\nT Section::getValue(const QString & key, const T& defaultValue,\n           bool insertDefaultIfNotExist){\n#ifndef NDEBUG\n    QString assertTmpResult;\n    assert(qVariantTo(defaultValue, &assertTmpResult) );\n#endif\n    auto & valWithMeta = generateValueWithMeta(key, QString(), { QVariant::fromValue(defaultValue) },\n                                                      insertDefaultIfNotExist);\n    if(valWithMeta.rawStr.isNull()){\n        // not parsed, return default;\n        return defaultValue;\n    }\n    return convertValueOrThrow<T>(valWithMeta.rawStr, key);\n}\n\n\n/// Similar to getValue, but support for multiple values stored within the\n/// same key. The single-value container, whose elements are of ValT is then\n/// filled. The value is *always* trimmed.\n/// @throws ExcCfg\ntemplate<class ContainerT>\nContainerT Section::getValues(const QString &key, const ContainerT &defaultValue,\n                              bool insertDefaultIfNotExist,\n                              const QString &separator,\n                              Qt::SplitBehavior splitbehaviour)\n{\n    QVariantList defaultVariantValues;\n    for(const auto & val : defaultValue){\n#ifndef NDEBUG\n        QString assertTmpResult;\n        assert(qVariantTo(val, &assertTmpResult) );\n#endif\n        defaultVariantValues.push_back(QVariant::fromValue(val));\n    }\n    auto & valWithMeta = generateValueWithMeta(key, separator, defaultVariantValues,\n                                                      insertDefaultIfNotExist);\n    if(valWithMeta.rawStr.isNull()){\n        // not parsed, return default;\n        return defaultValue;\n    }\n    QStringList list = valWithMeta.rawStr.split(separator, splitbehaviour);\n    ContainerT container;\n    for(const QString & el : list){\n        auto parsedVal = convertValueOrThrow<typename ContainerT::value_type>(el, key);\n        addToContainer(container, parsedVal);\n    }\n    return container;\n}\n\n\n////////////////////////////////// private //////////////////////////////////\n\ntemplate<typename T>\nT Section::convertValueOrThrow(const QString &valueStr, const QString &keyname)\n{\n    try {\n        return qVariantTo_throw<T>(valueStr, false);\n    } catch (const ExcQVariantConvert& ex) {\n        throw qsimplecfg::ExcCfg( qtr(\"%1 (key %2) in section %3\")\n                                  .arg(ex.descrip(), keyname, m_sectionName));\n    }\n}\n\n} // namespace qsimplecfg\n"
  },
  {
    "path": "src/common/safe_file_update.h",
    "content": "#pragma once\n\n#include <sys/stat.h>\n#include <unistd.h>\n#include <iostream>\n\n#include \"interrupt_handler.h\"\n#include \"logger.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"qfilethrow.h\"\n#include \"util.h\"\n\n/// Safeley read and update (config) files without locking, even on NFS(!).\n/// For 'normal' filesystems (e.g. ext4) renaming files within the same filesystem\n/// is an 'atomic' operation. However, on NFS this is not the case (see e.g.\n/// https://serverfault.com/questions/817887/rename-on-nfs-atomicity ). However, link-\n/// or directory-creation is referred being atomic (https://unix.stackexchange.com/a/125946).\n/// Therefore, we use the following procedure:\n/// The basic procedure is:\n/// * Updaters 'atomically' create a lock_dir, do the rename, sync, and remove the lock\n/// * Readers are prepared for non-existing files and stale reads, whereupon they try to\n///   gain the lock themselves. Once they got the lock, they try to read the config file\n///   again, to differentiate file-in-update from file-not-exist.\nclass SafeFileUpdate\n{\npublic:\n    explicit SafeFileUpdate(const QString& filepath):\n        m_filepath(filepath),\n        m_lockfilepath(filepath.toLocal8Bit() + \"__lock\"),\n        m_file(filepath)\n    {}\n\n    ~SafeFileUpdate() {\n        // Do not throw from destructor\n        try {\n            if(m_isLocked){\n                doUnlock();\n            }\n        } catch (const std::exception& ex ) {\n            std::cerr << ex.what() << \"\\n\";\n        }\n    }\n\n    QFileThrow& file() {\n        return m_file;\n    }\n\n    template <typename F>\n    bool read(F func){\n        const QIODevice::OpenMode openflags = QIODevice::OpenModeFlag::ReadOnly |\n                QIODevice::OpenModeFlag::Text;\n        auto finallyClose = finally([this]{ if(m_file.isOpen()){ m_file.close(); } });\n        try {\n            m_file.open(openflags);\n            func();\n            return true;\n        } catch (const QExcIo& ex) {\n            switch (ex.errorNumber()) {\n            case ENOENT: break;\n            case ESTALE: break;\n            default:\n                logWarning << \"unhandled error in\" <<__func__ << \"file\" << m_filepath;\n                throw;\n            }\n        }\n        doLock();\n        // logDebug << \"got lock for reading\";\n        QFileInfo fileInfo(m_filepath);\n        if(! fileInfo.exists()){\n            // We got the lock, but the file still does not exist.\n            // In the case of config files this usually means, that we'll\n            // initially create it later. We keep the lock.\n            logDebug << \"file does not exist:\" << m_filepath;\n            return false;\n        }\n        // We got the lock and the file exists. Reading it should now succeed\n        try {\n            // reopen the file anyway, as the descriptor might be 'stale'.\n            if(m_file.isOpen()){\n                 m_file.close();\n             }\n             m_file.open(openflags);\n            func();\n        } catch (const std::exception&) {\n            logDebug << \"second attempt failed altough we got the lock. Oh oh...\";\n            doUnlock();\n            throw;\n        }\n        return true;\n    }\n\n    template <typename F>\n    void write(F func){\n        bool renameSuccess = false;\n        int fd = -1;\n        if(! m_isLocked){\n            doLock();\n        }\n        // logDebug << \"got lock for update\";\n\n        QByteArray tmpFilepath = pathJoinFilename(\n                    QFileInfo(m_filepath).absolutePath().toLocal8Bit(),\n                    QByteArray(\"tmp.XXXXXX\"));\n        auto finalActions = finally([this, &fd, &renameSuccess, &tmpFilepath]{\n            if(fd != -1 && ! renameSuccess){\n                os::remove(tmpFilepath);\n            }\n            if(m_file.isOpen()){\n                m_file.close();\n            }\n            this->doUnlock();\n        });\n\n        fd = osutil::mktmp(tmpFilepath);\n        m_file.open(fd, QIODevice::OpenModeFlag::ReadWrite, QFileDevice::AutoCloseHandle);\n        func();\n        // Also flushes the file before sync\n        m_file.close();\n        auto dstPath = m_filepath.toLocal8Bit();\n        os::rename(tmpFilepath, dstPath);\n        renameSuccess = true;\n        sync();\n    }\n\npublic:\n    Q_DISABLE_COPY(SafeFileUpdate)\n\nprivate:\n    QString m_filepath;\n    QByteArray m_lockfilepath;\n    QFileThrow m_file;\n    bool m_isLocked{false};\n    InterruptProtect m_interruptProtect;\n\n    void doLock(){\n        if(m_isLocked){\n            throw QExcProgramming(QString(__func__) + \": already locked \" + m_filepath);\n        }\n        QFileInfo fileInfo(m_filepath);\n        if(! QDir().mkpath(fileInfo.absolutePath())){\n            throw QExcIo(qtr(\"Failed to create directories for path %1\")\n                                     .arg(fileInfo.absolutePath()) );\n        }\n\n        m_interruptProtect.enable(os::catchableTermSignals());\n\n        // try to lock the path by creating a dir. Link creation\n        // is atomically on NFS.\n        for(int i=0; i < 10; i++){\n            if(mkdir(m_lockfilepath.data(), 0755) == 0){\n                m_isLocked = true;\n                return;\n            }\n            if(errno != EEXIST){\n                throw QExcIo(\"failed to create lockfile \" + m_lockfilepath);\n            }\n            sleep(1);\n        }\n        throw QExcIo(\"Gave up creating lock-directory \" + m_lockfilepath +\n                     \". If it's not a load-problem, please remove the stale directory.\");\n    }\n\n    void doUnlock(){\n        if(!m_isLocked){\n            throw QExcProgramming(QString(__func__) + \": not locked \" + m_filepath);\n\n        }\n        if(rmdir(m_lockfilepath) != 0){\n            logCritical << __func__ << \"failed to remove lockpath:\" << strerror(errno);\n        }\n        m_interruptProtect.disable();\n        m_isLocked = false;\n    }\n\n};\n"
  },
  {
    "path": "src/common/settings.cpp",
    "content": "\n#include <cassert>\n#include <QStandardPaths>\n#include <QVariant>\n#include <QDebug>\n#include <QCoreApplication>\n#include <regex>\n\n\n#include \"settings.h\"\n\n#include \"cfg.h\"\n#include \"exccfg.h\"\n#include \"os.h\"\n#include \"util.h\"\n#include \"pathtree.h\"\n#include \"logger.h\"\n#include \"app.h\"\n#include \"translation.h\"\n#include \"cflock.h\"\n#include \"qfilethrow.h\"\n#include \"conversions.h\"\n#include \"safe_file_update.h\"\n\nusing Section_Ptr = qsimplecfg::Cfg::Section_Ptr;\nusing qsimplecfg::ExcCfg;\nusing StringSet = Settings::StringSet;\nusing std::numeric_limits;\n\nconst char* Settings::SECT_READ_NAME {\"File read-events\"};\nconst char* Settings::SECT_READ_KEY_ENABLE {\"enable\"};\nconst char* Settings::SECT_READ_KEY_INCLUDE_PATHS {\"include_paths\"};\n\n\nconst char* Settings::SECT_SCRIPTS_NAME {\"File read-events storage settings\"};\nconst char* Settings::SECT_SCRIPTS_ENABLE {\"enable\"};\nconst char* Settings::SECT_SCRIPTS_INCLUDE_PATHS {\"include_paths\"};\nconst char* Settings::SECT_SCRIPTS_INCLUDE_FILE_EXTENSIONS {\"include_file_extensions\"};\n\n\nSettings &Settings::instance()\n{\n    static Settings s;\n    return s;\n}\n\nvoid Settings::setUserCfgDir(const QString &p)\n{\n    m_userCfgDir = p;\n}\n\nvoid Settings::setUserDataDir(const QString &p)\n{\n    m_userDataDir = p;\n}\n\n\nconst QStringList &Settings::defaultIgnoreCmds()\n{\n    // commands ending with an asterisk will be\n    // later inserted into the to-ignore-commands\n    // No args may be added in that case.\n    static const QStringList vals = {\"mount*\",\n                                     QString(app::SHOURNAL) + '*',\n                                     QString(app::SHOURNAL_RUN) + '*'\n                                         };\n    return vals;\n}\n\nQString Settings::cfgAppDir()\n{\n    if(! m_userCfgDir.isEmpty()){\n        return m_userCfgDir;\n    }\n    // don't make path static -> mutliple test cases...\n    return\n       pathJoinFilename(QStandardPaths::writableLocation(QStandardPaths::ConfigLocation),\n                        QCoreApplication::applicationName());\n}\n\nQString Settings::cfgFilepath()\n{\n    // don't make path static -> mutliple test cases...\n    return cfgAppDir() + \"/config.ini\";\n}\n\nQString Settings::dataDir()\n{\n    if(! m_userDataDir.isEmpty()){\n        return m_userDataDir;\n    }\n    return QStandardPaths::writableLocation(QStandardPaths::DataLocation);\n}\n\n// was in use until shournal 2.1, then migrated\n// from .cache/shournal to .config/shournal\nstatic QString legacyCfgVersionFilePath(){\n\n    // don't make path static -> mutliple test cases...\n    const QString path = QStandardPaths::writableLocation(QStandardPaths::CacheLocation)\n            + \"/config-file-version\";\n    return path;\n}\n\n\n/// For lines not ending with an asterisk:\n/// The first word (whitespace!) is considered the command, whose full\n/// path is found, the rest is appended as arguments.\n/// If no command could be found, the returned string is empty\n/// Example\n/// \"bash -c\"  -> /bin/bash -c\n/// \"bash\" -> /bin/bash\nstatic QString ignoreCmdLineToFullCmdAndArgs(const QString& str){\n    int spaceIdx = str.indexOf(QChar::Space);\n    if(spaceIdx == -1){\n        return  QStandardPaths::findExecutable(str);\n    }\n    QString cmd = str.left(spaceIdx);\n    cmd = QStandardPaths::findExecutable(cmd);\n    if(cmd.isEmpty()) return cmd;\n    // space still there...:\n    return cmd + str.mid(spaceIdx);\n\n}\n\n\n/// Adds a given command string from defaults or config file to the respective set.\n/// @param warnIfNotFound: if false, no warnings are printed if a command is not found.\n/// It is to prevenet printing warnings for default commands, which are not installed on\n/// the target system.\n/// If a command ends with an asterisk, it will be ignored, regardless\n/// of its arguments. Else the commands are only ignored it the argument match exactly\nvoid Settings::addIgnoreCmd(QString cmd, bool warnIfNotFound, const QString & ignoreCmdsSectName){\n    const QString lineCopy = cmd;\n    // do not simplify as argmument in the config parser!\n    cmd=cmd.simplified();\n    const QString ignoreCmdsErrPreamble = ignoreCmdsSectName +\n            qtr(\": invalid command in line \") + '<';\n    if(cmd.endsWith('*')){\n        cmd.remove(cmd.size()-1, 1);\n        cmd=cmd.trimmed();\n        if(cmd.contains(' ')){\n            logWarning << ignoreCmdsErrPreamble << lineCopy << \"> - \"\n                       << qtr(\"The command contains whitespaces. \"\n                              \"Note that arguments are not (yet) supported \"\n                              \"when wildcards are used.\");\n\n            return;\n        }\n        QString fullPath = QStandardPaths::findExecutable(cmd);\n        if(fullPath.isEmpty()){\n            if(warnIfNotFound){\n                logWarning << ignoreCmdsErrPreamble << lineCopy << \"> - not found:\"  << cmd;\n            }\n        } else {\n            m_ignoreCmdsRegardlessOfArgs.insert(fullPath.toStdString());\n        }\n    } else {\n        cmd = ignoreCmdLineToFullCmdAndArgs(cmd);\n        if(cmd.isEmpty()){\n            if(warnIfNotFound){\n                logWarning << ignoreCmdsErrPreamble << lineCopy << \"> - not found.\" ;\n            }\n        } else {\n\n            m_ignoreCmds.insert(cmd.toStdString());\n        }\n    }\n}\n\n\n\n\n/// @param hiddenPaths: if not null, store hidden paths in the passed tree, instead\n/// of the returned one.\nstd::shared_ptr<PathTree> Settings::loadPaths(Section_Ptr& section,\n          const QString& keyName,\n          bool eraseSubpaths,\n          const std::unordered_set<QString> & defaultValues,\n          PathTree* hiddenPaths){\n    auto rawPaths = section->getValues<std::unordered_set<QString> >(keyName,\n                                              defaultValues,\n                                              false, \"\\n\");\n    auto tree = std::make_shared<PathTree>();\n    for(const auto& p : rawPaths){\n        QString canonicalPath = p;\n        if(canonicalPath.startsWith(\"$CWD\")){\n            if(m_workingDir.isEmpty()){\n                logWarning << qtr(\"section %1: %2: $CWD is set but the working-\"\n                                  \"directory could not be determined. Maybe it does \"\n                                  \"not exist?\")\n                              .arg(section->sectionName(), keyName);\n                continue;\n            }\n            canonicalPath.replace(\"$CWD\", m_workingDir);\n\n        } else if(canonicalPath.startsWith(\"$HOME\")){\n            canonicalPath.replace(\"$HOME\", m_userHome);\n        } else if(canonicalPath.startsWith(\"~\")) {\n            canonicalPath.replace(\"~\", m_userHome);\n        }\n\n        canonicalPath = QDir(canonicalPath).canonicalPath();\n        if(canonicalPath.isEmpty()){\n            logWarning << qtr(\"section %1: %2: path does not exist: %3\")\n                          .arg(section->sectionName(), keyName, p);\n            continue;\n        }\n\n        PathTree* currentTree =(hiddenPaths != nullptr &&\n                                canonicalPath.contains(\"/.\")) ? hiddenPaths : tree.get();\n        auto canoicalPathLight = toStrLight(canonicalPath);\n        // avoid adding needless parent/subpaths\n        if(eraseSubpaths){\n            if(currentTree->isSubPath(canoicalPathLight)){\n                logDebug<< keyName << \"ignore\" << canonicalPath\n                         << \"because it is a subpath\";\n                continue;\n\n            }\n            // the new path might be a parent path:\n            // erase its children (if any)\n            auto subPathIt = currentTree->subpathIter(canoicalPathLight);\n            while(subPathIt != currentTree->end() ){\n                logDebug << keyName << \"ignore\" << *subPathIt\n                         << \"because it is a subpath\";\n                subPathIt = currentTree->erase(subPathIt);\n            }\n        }\n        currentTree->insert(canoicalPathLight);\n    }\n    return tree;\n}\n\n\n\n\n/// Until shournal 2.1 the version file was located at .cache, then migrated\n/// to .config/shournal\n/// @return QVersionNumer.isNull==true In case of no or an invalid version.\nstatic QVersionNumber readLegacyConfigFileVersion(){\n    const QString path = legacyCfgVersionFilePath();\n    QFile f(path);\n    if(! f.open(QFile::OpenModeFlag::ReadOnly)){\n        return {};\n    }    \n    CFlock l(f.handle());\n    l.lockShared();\n    auto ver = QVersionNumber::fromString(QTextStream(&f).readLine());\n    l.unlock(); // unlock explicitly: in case f is removed, it is closed beforehand,\n                // so the fd passed to CFlock is already invalid.\n    if(ver.isNull()){\n        logWarning << QString(\"Bad version string in file %1. Deleting it...\")\n                      .arg(path);\n        f.remove();\n    }\n    return ver;\n}\n\n\n\n/// Remove all paths from excludePaths which are not sub-paths\n/// of any tree in includePathtrees. Print a warning in this case.\nstatic void cleanExcludePaths(const QVector<const PathTree*>& includePathtrees,\n                              std::shared_ptr<PathTree>& excludePaths,\n                              const QString& sectionName){\n    for(auto it=excludePaths->begin(); it != excludePaths->end();){\n        bool isSubPath = false;\n        for(const PathTree* includePaths : includePathtrees){\n            if(includePaths->isSubPath(*it)){\n                isSubPath = true;\n                break;\n            }\n        }\n        if(isSubPath){\n            ++it;\n        } else {\n            logWarning << qtr(\"section %1: ignore exclude-path %2 - it is not a sub-path \"\n                              \"of any include-path\").arg(sectionName).arg((*it).c_str());\n            it = excludePaths->erase(it);\n        }\n    }\n}\n\n// static void cleanExcludePaths(const PathTree& includePaths, PathTree& excludePaths,\n//                                  const QString& sectionName){\n//     cleanExcludePaths( {&includePaths}, excludePaths, sectionName);\n// }\n\nstatic void cleanExcludePaths(const std::shared_ptr<PathTree>& includePaths,\n                              const PathTree* optionalIncludePaths,\n                              std::shared_ptr<PathTree>& excludePaths,\n                              const QString& sectionName){\n    if(optionalIncludePaths != nullptr){\n        cleanExcludePaths( {includePaths.get(), optionalIncludePaths}, excludePaths, sectionName);\n    }\n    cleanExcludePaths( {includePaths.get()}, excludePaths, sectionName);\n}\n\nbool Settings::loadSections(const QVersionNumber& parsedCfgVersion){\n    m_cfg.setInitialComments(qtr(\n                                 \"Configuration file for %1. Uncomment lines \"\n                                 \"to change defaults. Multi-line-values (e.g. paths) \"\n                                 \"are framed by leading and trailing \"\n                                 \"triple-quotes ''' .\\n\"\n                                 \"When loading paths, the following symbols may be \"\n                                 \"specified:\\n\"\n                                 \"$HOME or ~ for your home directory\\n\"\n                                 \"$CWD for the current working directory\\n\"\n                                 \"In several sections, the key 'exclude_hidden'\\n\"\n                                 \"can be set to true - in this case, a \"\n                                 \"file event is excluded, if it is below \"\n                                 \"*any* hidden directory or is hidden itself. A \"\n                                 \"explicitly included hidden file is not affected.\\n\"\n                                 \"Please do not store custom comments in this file, \"\n                                 \"as those are lost each time shournal is updated to \"\n                                 \"a new version.\").arg(app::SHOURNAL)\n                             );\n    bool updateNeeded = false;\n    updateNeeded |= loadSectWrite(parsedCfgVersion);\n    updateNeeded |= loadSectRead(parsedCfgVersion);\n    loadSectScriptFiles();\n    loadSectIgnoreCmd();\n    loadSectMount();\n    loadSectHash();\n    return updateNeeded;\n}\n\nbool Settings::loadSectWrite(const QVersionNumber& parsedCfgVersion)\n{\n    auto sectWriteEvents = m_cfg[\"File write-events\"];\n    bool updateNeeded = false;\n    sectWriteEvents->setComments(qtr(\n                                     \"Configure, which paths shall be observed for \"\n                                     \"*write*-events. Put each desired path into \"\n                                     \"a separate line. \"\n                                     \"Default is to observe all paths.\\n\"\n                                     ));\n    m_wSettings.excludeHidden = sectWriteEvents->getValue<bool>(\"exclude_hidden\", true);\n    PathTree* hiddenPaths = (m_wSettings.excludeHidden) ? m_wSettings.includePathsHidden.get() : nullptr;\n    m_wSettings.includePaths = loadPaths(\n                sectWriteEvents, \"include_paths\", true, {\"/\"}, hiddenPaths);\n    m_wSettings.excludePaths = loadPaths(\n                sectWriteEvents, \"exclude_paths\", true, {});\n    cleanExcludePaths(m_wSettings.includePaths, hiddenPaths, m_wSettings.excludePaths,\n                         sectWriteEvents->sectionName());\n\n    bool insertMaxEventCount;\n    uint32_t maxEventCount;\n    if(parsedCfgVersion < QVersionNumber{2,4}){\n        // Backwards compatibility: old versions did not impose\n        // an event limit\n        updateNeeded = true;\n        logDebug << \"updating cfg-file to\" << QVersionNumber{2,4}.toString();\n        insertMaxEventCount = true;\n        maxEventCount = 0;\n    } else {\n        insertMaxEventCount = false;\n        maxEventCount = 5000;\n    }\n    m_wSettings.maxEventCount = sectWriteEvents->getValue<uint32_t>(\n                \"max_event_count\", maxEventCount, insertMaxEventCount);\n    m_wSettings.maxEventCount = (m_wSettings.maxEventCount == 0) ?\n                                    numeric_limits<uint64_t>::max() :\n                                    m_wSettings.maxEventCount;\n    return updateNeeded;\n}\n\nbool Settings::loadSectRead(const QVersionNumber& parsedCfgVersion)\n{\n    auto sectReadEvents  = m_cfg[SECT_READ_NAME];\n    bool updateNeeded = false;\n    sectReadEvents->setComments(qtr(\n                                   \"Configure, which paths shall be observed for \"\n                                   \"read- or exec-events. Put each desired path into \"\n                                   \"a separate line. \"\n                                   \"Per default read file-events are only logged, \"\n                                   \"if you have *also* write permission (assuming other \"\n                                   \"read files are not of interest).\"\n                                   ));\n    m_rSettings.enable = sectReadEvents->getValue<bool>(SECT_READ_KEY_ENABLE, true);\n    m_rSettings.onlyWritable = sectReadEvents->getValue<bool>(\"only_writable\", true);\n    m_rSettings.excludeHidden = sectReadEvents->getValue<bool>(\"exclude_hidden\", true);\n    PathTree* hiddenPaths = (m_rSettings.excludeHidden) ? m_rSettings.includePathsHidden.get() : nullptr;\n    m_rSettings.includePaths = loadPaths(\n                sectReadEvents, SECT_READ_KEY_INCLUDE_PATHS, true, {\"$HOME\"}, hiddenPaths);\n    m_rSettings.excludePaths = loadPaths(\n                sectReadEvents, \"exclude_paths\", true, {});\n    cleanExcludePaths(m_rSettings.includePaths, hiddenPaths, m_rSettings.excludePaths,\n                         sectReadEvents->sectionName());\n\n    bool insertMaxEventCount;\n    uint32_t maxEventCount;\n    if(parsedCfgVersion < QVersionNumber{2,4}){\n        // Backwards compatibility: older versions did not impose\n        // an event limit\n        updateNeeded = true;\n        logDebug << \"updating cfg-file to\" << QVersionNumber{0,9}.toString();\n        insertMaxEventCount = true;\n        maxEventCount = 0;\n    } else {\n        insertMaxEventCount = false;\n        maxEventCount = 5000;\n    }\n    m_rSettings.maxEventCount = sectReadEvents->getValue<uint32_t>(\n                \"max_event_count\", maxEventCount, insertMaxEventCount);\n    m_rSettings.maxEventCount = (m_rSettings.maxEventCount == 0) ?\n                                    numeric_limits<uint64_t>::max() :\n                                    m_rSettings.maxEventCount;\n    return updateNeeded;\n}\n\nvoid Settings::loadSectScriptFiles()\n{\n    auto sectScriptFiles = m_cfg[SECT_SCRIPTS_NAME];\n    const QString scriptFiles_OnlyWritableKey = \"only_writable\";\n    sectScriptFiles->setComments(\n                qtr(\"Configure what files (scripts), which were *read* \"\n                    \"by the observed command, shall be stored within \"\n                    \"%1's database.\\n\"\n                    \"The maximal filesize may have units such as KiB, MiB, etc.. \"\n                    \"You can specify file-extensions or mime-types \"\n                    \"(only with the fanotify-backend) to match desired \"\n                    \"file-types, e.g. sh (without leading dot!) or application/x-shellscript. \"\n                    \"The following rules apply: if both are unset, \"\n                    \"accept all file-types (not recommended), if one of the \"\n                    \"two is unset then only the set one is considered, if both \"\n                    \"are set, at least one of the two has to match for the file to \"\n                    \"be stored. \"\n                    \"Note that finding out the mimetype is a lot more \"\n                    \"computationally expensive than the file-extension-method. %1 \"\n                    \"can list a mimetype for a given file, see also %1 --help.\"\n                    \"\\n\"\n                    \"Per default only the first N read files matching all given rules \"\n                    \"are saved for each command-sequence (max_count_of_files).\\n\"\n                    \"%2: only store a read file, if you have write- (not only read-) \"\n                    \"permission for it.\\n\"\n                    \"Storing read files is disabled by default.\\n\"\n                    ).arg(app::SHOURNAL, scriptFiles_OnlyWritableKey));\n    m_scriptSettings.enable = sectScriptFiles->getValue<bool>(SECT_SCRIPTS_ENABLE, false);\n    m_scriptSettings.onlyWritable = sectScriptFiles->getValue<bool>(scriptFiles_OnlyWritableKey, true);\n    m_scriptSettings.maxFileSize = sectScriptFiles->getFileSize(\"max_size\", 500*1024) ;\n    m_scriptSettings.maxCountOfFiles = static_cast<int>(sectScriptFiles->getValue<uint>(\n                \"max_count_of_files\", 3));\n    m_scriptSettings.excludeHidden = sectScriptFiles->getValue<bool>(\"exclude_hidden\", true);\n    PathTree* hiddenPaths = (m_scriptSettings.excludeHidden) ?\n                m_scriptSettings.includePathsHidden.get() : nullptr;\n\n    m_scriptSettings.includeExtensions = sectScriptFiles->getValues<StrLightSet>(\n                SECT_SCRIPTS_INCLUDE_FILE_EXTENSIONS, {\"sh\"}, false, \"\\n\");\n    m_scriptSettings.includeMimetypes = sectScriptFiles->getValues<MimeSet>(\n                \"include_mime_types\", {\"application/x-shellscript\"}, false, \"\\n\");\n\n\n\n    m_scriptSettings.includePaths = loadPaths(sectScriptFiles, SECT_SCRIPTS_INCLUDE_PATHS,\n                                              true, {\"/\"}, hiddenPaths);\n    m_scriptSettings.excludePaths = loadPaths(sectScriptFiles, \"exclude_paths\",\n                                                 true, {});\n    cleanExcludePaths(m_scriptSettings.includePaths, hiddenPaths, m_scriptSettings.excludePaths,\n                         sectScriptFiles->sectionName());\n\n    // make user configurable? If so, make sure not bigger than sizeof(int)/2...\n    m_scriptSettings.flushToDiskTotalSize = 1024 * 1024 * 10;\n}\n\nvoid Settings::loadSectIgnoreCmd()\n{\n    auto sectIgnoreCmd   = m_cfg[\"Ignore-commands\"];\n    m_ignoreCmdsRegardlessOfArgs.clear();\n    m_ignoreCmds.clear();\n\n    const QString sect_ignore_cmds_commands = \"commands\";\n\n    sectIgnoreCmd->setComments(qtr(\n                      \"Only applies to the shell-integration and the\\n\"\n                      \"fanotify backend!\\n\"\n                      \"Exclude specific commands from observation. \"\n                      \"The (optional) path to the commands must not contain whitepaces \"\n                      \"(create a symlink and import that PATH, if necessary). \"\n                      \"You can provide arguments so that a given \"\n                      \"command is only excluded, if it is followed \"\n                      \"by exactly the given arguments (order matters). \"\n                      \"Further wildcards (*) are supported, but ONLY \"\n                      \"for commands, so that a command can be excluded \"\n                      \"regardless of its arguments.\\n\\n\"\n                      \"%1 = '''bash\\n\"\n                      \"bash -i\\n\"\n                      \"screen\\n\"\n                      \"mount*'''\\n\").arg(sect_ignore_cmds_commands)\n                              );\n\n    for(const auto & c : defaultIgnoreCmds()){\n        addIgnoreCmd(c, false, sectIgnoreCmd->sectionName());\n    }\n\n    sectIgnoreCmd->setInsertDefaultToComments(false);\n    for(const auto & c : sectIgnoreCmd->getValues<QStringList>(sect_ignore_cmds_commands,\n                                                              QStringList(),\n                                                              false, \"\\n\")) {\n        addIgnoreCmd(c, true, sectIgnoreCmd->sectionName());\n    }\n    sectIgnoreCmd->setInsertDefaultToComments(true);\n}\n\nvoid Settings::loadSectMount()\n{\n    auto sectMount       = m_cfg[\"mounts\"];\n    const QString sect_mount_ignore = \"exclude_paths\";\n\n    sectMount->setComments(qtr(\n                           \"Only applies to the fanotify backend!\"\n                           \"Ignore sub-mount-paths from observation. \"\n                           \"This is typically only needed, if \"\n                           \"you don't have permissions on some \"\n                           \"mounts and want to supress warnings. \"\n                           \"Pseudo-filesytems like /proc are already excluded. \"\n                           \"Put each absolute path into a separate line.\\n\"\n                           \"To ignore mounts for which you don't have access permissions, \"\n                           \"set the respective flag to true.\\n\"\n                           )\n                          );\n    m_mountIgnoreNoPerm = sectMount->getValue<bool>(\"ignore_no_permission\", false);\n\n    m_mountIgnorePaths = sectMount->getValues<StrLightSet>(sect_mount_ignore,\n                                                       {},\n                                                       false, \"\\n\");\n\n    std::vector<const char*> defaultMountIgnorePaths = {\"/proc\", \"/sys\", \"/run\",\n                          \"/dev/hugepages\", \"/dev/mqueue\", \"/dev/pts\"};\n    m_mountIgnorePaths.insert(defaultMountIgnorePaths.begin(), defaultMountIgnorePaths.end());\n    if(os::getuid() != 0){\n        m_mountIgnorePaths.insert(\"/root\");\n    }\n}\n\nvoid Settings::loadSectHash()\n{\n    auto sectHash        = m_cfg[\"Hash\"];\n\n    const QString sect_hash_enable = \"enable\";\n    const QString sect_hash_chunksize = \"chunksize\";\n    const QString sect_hash_maxCountReads = \"max-count-reads\";\n\n    sectHash->setComments(qtr(\n                          \"Note: this section includes advanced settings and should not be \"\n                          \"changed at all in most cases and if so, only with a fresh database. \"\n                          \"%1 or %2 should *not* be changed during the lifetime of the database. \"\n                          \"Changing it is not a well tested feature and in any case causes overhead \"\n                          \"for hash-based database-queries.\").\n                          arg(sect_hash_chunksize, sect_hash_maxCountReads));\n\n    m_hashSettings.hashEnable = sectHash->getValue<bool>(sect_hash_enable, true, true);\n    // Exclude negative values by using uint\n    m_hashSettings.hashMeta.chunkSize = static_cast<HashMeta::size_type>(\n                sectHash->getValue<uint>(sect_hash_chunksize, 256, true));\n    m_hashSettings.hashMeta.maxCountOfReads = static_cast<HashMeta::size_type>(\n                sectHash->getValue<uint>(sect_hash_maxCountReads, 3, true));\n    if(m_hashSettings.hashEnable){\n        // TODO: also limit maxCountOfReads -> see kernel module\n\n        if(m_hashSettings.hashMeta.chunkSize < 8 ||\n                m_hashSettings.hashMeta.chunkSize > 1024 * 40 ||\n                m_hashSettings.hashMeta.maxCountOfReads < 1){\n            throw ExcCfg(qtr(\"Invalid hashsettings. Must be:\"\n                             \" 8 >= %1 <= 40KiB  and %2 > 1\")\n                         .arg(sect_hash_chunksize, sect_hash_maxCountReads));\n        }\n    }\n}\n\n\nSettings::ReadVersionReturn Settings::readVersion(SafeFileUpdate& verUpd8)\n{\n    ReadVersionReturn ret;\n    verUpd8.read([&ret, &verUpd8]{\n        ret.ver = QVersionNumber::fromString(QTextStream(&verUpd8.file()).readLine());\n    });\n    ret.verFilePath = verUpd8.file().fileName();\n    if(ret.ver.isNull() ){\n        // check legacy version file (migrated...)\n        ret.ver = readLegacyConfigFileVersion();\n        if(ret.ver.isNull()){\n            logInfo << qtr(\"No valid version-file found, although a config-file existed. This \"\n                           \"should only happen during the transition from shournal < 2.1 \"\n                           \"to a version >= 2.1.\");\n            ret.ver = app::initialVersion();\n        } else {\n            ret.verFilePath = legacyCfgVersionFilePath();\n        }\n    }\n    return ret;\n}\n\n/// If cached cfg-version is newer than our app's version, throw,\n/// if it is older, migrate sections to new names.\n/// @return: true, if an update was necessary, else false\nbool Settings::updateCfgScheme\n(const QVersionNumber& configSchemeVer, Settings::ReadVersionReturn &readVerResult)\n{\n    if(readVerResult.ver == configSchemeVer){\n        return false;\n    }\n    if(readVerResult.ver > configSchemeVer){\n         throw ExcCfg(qtr(\"The config-file version is greater than the \"\n                          \"scheme version. This most likely happens \"\n                          \"if running shournal's shell integration while \"\n                          \"shournal was updated. In that case \"\n                          \"simply exit the shell session and start it again. \"\n                          \"Otherwise you might have \"\n                          \"downgraded shournal and need to manually correct \"\n                          \"the version-file at %1. \"\n                          \"Cached version is %2, current scheme version is %3\")\n                      .arg(readVerResult.verFilePath)\n                      .arg(readVerResult.ver.toString())\n                      .arg(configSchemeVer.toString()));\n    }\n\n    if(readVerResult.ver < QVersionNumber{0,9}){\n        logDebug << \"updating cfg-file to\" << QVersionNumber{0,9}.toString();\n        m_cfg.renameParsedSection(\"Hash\", \"Hash for file write-events\");\n    }\n    if(readVerResult.ver < QVersionNumber{2,1}){\n        logDebug << \"updating cfg-file to\" << QVersionNumber{2,1}.toString();\n        // Because of new section  [File read-events] for read-events for which\n        // no (script-) files shall be stored, rename old [File read-events].\n        // Read files now also support hash, so rename the hash-section (again).\n        m_cfg.renameParsedSection(\"File read-events\", \"File read-events storage settings\");\n        m_cfg.renameParsedSection(\"Hash for file write-events\", \"Hash\");\n    }\n    return true;\n}\n\n/// Store the config to disk. Note that this is only done for new versions,\n/// that's why the version file is also updated alongside.\nvoid Settings::storeCfg\n(const QVersionNumber& configSchemeVer, SafeFileUpdate &cfgUpd8, SafeFileUpdate &verUpd8)\n{\n    cfgUpd8.write([this, &cfgUpd8]{\n        m_cfg.store(cfgUpd8.file());\n    });\n\n    verUpd8.write([&verUpd8, &cfgUpd8, &configSchemeVer]{\n        QTextStream(&verUpd8.file()) << configSchemeVer.toString();\n\n        QFileInfo legacyVersionInfo(legacyCfgVersionFilePath());\n        if(legacyVersionInfo.exists() && ! legacyVersionInfo.isSymLink()){\n            // atomically create symlink to new cfg version file (using a temporary\n            // symlink and rename/move that):\n            logInfo << qtr(\"handle legacy config version file: creating symlink to \"\n                           \"new location...\");\n            auto uuid = make_uuid();\n            QByteArray cfgPathBytes = cfgUpd8.file().fileName().toLocal8Bit();\n            QByteArray tmpSymlinkLocation = legacyVersionInfo.absoluteDir().filePath(uuid).toLocal8Bit();\n            os::symlink(cfgPathBytes.constData(), tmpSymlinkLocation.constData());\n            os::rename(tmpSymlinkLocation, legacyVersionInfo.absoluteFilePath().toLocal8Bit());\n        }\n    });\n\n}\n\n/// Parse or create the configuration file at the system's config path\n/// (please perform QCoreApplication::setApplicationName() before).\n/// Another file at config dir provides the version. If the config file version is greater\n/// than out scheme version, throw, if smaller, update the version and possibly\n/// the config-file-scheme as well. Scheme updates\n/// for sections work by directly renaming the sections in loadSections() and\n/// by moving the old to new sections in updateCfgScheme(). Note that\n/// this also works in case of \"redundant\" scheme updates, where over multiple\n/// scheme versions the same section is renamed multiple times. Intermediate\n/// sections are created as necessary and potentially dropped/renamed again\n/// by subsequent scheme updates.\n/// @throws ExcCfg\nvoid Settings::load()\n{\n    const auto cfgPath = cfgFilepath();\n\n    const QString cfgDir(splitAbsPath(cfgPath).first);\n    QDir dir;\n    if(! dir.mkpath(cfgDir) ){\n        throw QExcIo(qtr(\"Failed to create configuration directory at %1\")\n                                 .arg(cfgDir) );\n    }\n\n    SafeFileUpdate cfgUpd8(cfgPath);\n    bool cfgFileExisted = cfgUpd8.read([this, &cfgUpd8]{\n        m_cfg.parse(cfgUpd8.file());\n    });\n\n    // Until shournal v3.2 the config version was always set to the application version.\n    // This required a synchronized update of all machines sharing the same config dir.\n    // Therefore, only update the config version if a scheme update is necessary.\n    const auto configSchemeVer = QVersionNumber{3, 2};\n    auto parsedCfgVersion = configSchemeVer;\n    SafeFileUpdate verUpd8(pathJoinFilename(cfgDir, QString(\".config-version\")));\n    bool cfgVersionNeedsUpdate = false;\n    if(cfgFileExisted){\n        // do we need a version update?\n        auto readVerRet = readVersion(verUpd8);\n        cfgVersionNeedsUpdate = updateCfgScheme(configSchemeVer, readVerRet);\n        parsedCfgVersion = readVerRet.ver;\n    }\n    try {\n        cfgVersionNeedsUpdate |= loadSections(parsedCfgVersion);\n\n        auto notReadKeys = m_cfg.generateNonReadSectionKeyPairs();\n        if(! notReadKeys.isEmpty()){\n            throw ExcCfg(qtr(\"Unexpected key in section [%1] - '%2'\")\n                          .arg(notReadKeys.first().first,\n                               *notReadKeys.first().second.begin()));\n        }\n        // Only write configuration to disk, if there was no such file\n        // or we are using a new scheme for the first time\n        if(! cfgFileExisted || cfgVersionNeedsUpdate){\n            logDebug << \"about to update config at\" << cfgPath;\n            storeCfg(configSchemeVer, cfgUpd8, verUpd8);\n        }\n    } catch(ExcCfg & ex) {\n        ex.setDescrip(ex.descrip() + qtr(\". The config file resides at %1\").arg(cfgPath));\n        throw;\n    }\n\n    m_settingsLoaded = true;\n}\n\n/// Select which backend to choose based on local/global\n/// config files and availability (search $PATH).\n/// @return app::SHOURNAL_RUN, app::SHOURNAL_RUN_FANOTIFY or an\n/// empty string if not found.\nQString Settings::chooseShournalRunBackend()\n{\n    auto appname = QCoreApplication::applicationName();\n    const QString localPath(cfgAppDir() + \"/backend\");\n    const QString globalPath = \"/etc/shournal.d/backend\";\n    QString selectedPath;\n\n    if(QFile::exists(localPath)){\n        selectedPath = localPath;\n    } else if(QFile::exists(globalPath)){\n        selectedPath = globalPath;\n    }\n\n    if(! selectedPath.isEmpty()){\n        // load backend from file\n        QFileThrow backendCfgFile(selectedPath);\n        backendCfgFile.open(QFile::OpenModeFlag::ReadOnly);\n        QTextStream s(&backendCfgFile);\n        auto backendStr =  s.readLine();\n        if(backendStr == \"ko\") {\n            return app::SHOURNAL_RUN;\n        }\n        if(backendStr == \"fanotify\") {\n            return app::SHOURNAL_RUN_FANOTIFY;\n        }\n        logWarning << qtr(\"Invalid backend %1 at file %2 - \"\n                          \"supported options: [fanotify, ko]. \"\n                          \"Using defaults...\").arg(backendStr, selectedPath);\n    }\n\n   if( ! QStandardPaths::findExecutable(app::SHOURNAL_RUN).isEmpty())\n       return app::SHOURNAL_RUN;\n\n   if( ! QStandardPaths::findExecutable(app::SHOURNAL_RUN_FANOTIFY).isEmpty())\n       return app::SHOURNAL_RUN_FANOTIFY;\n\n   return {};\n}\n\n\nconst Settings::StrLightSet &Settings::getMountIgnorePaths()\n{\n    assert(m_settingsLoaded);\n    return m_mountIgnorePaths;\n}\n\nbool Settings::getMountIgnoreNoPerm() const\n{\n    return m_mountIgnoreNoPerm;\n}\n\n\n\nconst Settings::StringSet &Settings::ignoreCmds()\n{\n    return m_ignoreCmds;\n}\n\nconst Settings::StringSet &Settings::ignoreCmdsRegardslessOfArgs()\n{\n    return m_ignoreCmdsRegardlessOfArgs;\n}\n\n\nconst Settings::WriteFileSettings &Settings::writeFileSettings() const\n{\n    return m_wSettings;\n}\n\nconst Settings::ReadFileSettings &Settings::readFileSettings() const\n{\n    return m_rSettings;\n}\n\nconst Settings::ScriptFileSettings &Settings::readEventScriptSettings() const\n{\n    return m_scriptSettings;\n}\n\nconst Settings::HashSettings &Settings::hashSettings() const\n{\n    return m_hashSettings;\n}\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "src/common/settings.h",
    "content": "\n#pragma once\n\n#include <QString>\n#include <unordered_set>\n#include <QVersionNumber>\n#include <memory>\n\n#include \"hashmeta.h\"\n#include \"pathtree.h\"\n#include \"cfg.h\"\n#include \"qfilethrow.h\"\n\nusing std::make_shared;\n\nclass SafeFileUpdate;\n\nclass Settings {\npublic:\n    typedef std::unordered_set<std::string> StringSet;\n    typedef std::unordered_set<StrLight> StrLightSet;\n    typedef std::unordered_set<QString> MimeSet;\n\n    static Settings & instance();\n\n    struct HashSettings {\n        HashMeta hashMeta;\n        bool hashEnable{};\n    };\n\n    struct WriteFileSettings {\n        WriteFileSettings() :\n            includePaths(make_shared<PathTree>()),\n            includePathsHidden(make_shared<PathTree>()),\n            excludePaths(make_shared<PathTree>()) {}\n\n        std::shared_ptr<PathTree> includePaths;\n        std::shared_ptr<PathTree> includePathsHidden;\n        std::shared_ptr<PathTree> excludePaths;\n        bool excludeHidden {true};\n        uint64_t maxEventCount{std::numeric_limits<uint64_t>::max()};\n\n        Q_DISABLE_COPY(WriteFileSettings)\n        DISABLE_MOVE(WriteFileSettings)\n    };\n\n    struct ReadFileSettings {\n        ReadFileSettings() :\n        includePaths(make_shared<PathTree>()),\n        includePathsHidden(make_shared<PathTree>()),\n        excludePaths(make_shared<PathTree>()) {}\n\n        bool enable {true};\n        std::shared_ptr<PathTree> includePaths;\n        std::shared_ptr<PathTree> includePathsHidden;\n        std::shared_ptr<PathTree> excludePaths;\n        bool onlyWritable {true};\n        bool excludeHidden {true};\n        uint64_t maxEventCount{std::numeric_limits<uint64_t>::max()};\n\n        Q_DISABLE_COPY(ReadFileSettings)\n        DISABLE_MOVE(ReadFileSettings)\n    };\n\n    /// Holds settings for read files which shall be stored to disk\n    /// ( probably mostly scripts or similar files).\n    struct ScriptFileSettings {\n        ScriptFileSettings() :\n            includePaths(make_shared<PathTree>()),\n            includePathsHidden(make_shared<PathTree>()),\n            excludePaths(make_shared<PathTree>()) {}\n\n        // store read files to disk, if...\n        bool enable {false}; // .. enabled\n        bool onlyWritable {true}; // .. user has write permission\n        bool excludeHidden {true}; // .. it is not hidden\n        std::shared_ptr<PathTree> includePaths; // .. it is equal to or below an include path\n        std::shared_ptr<PathTree> includePathsHidden; // .. see above\n        std::shared_ptr<PathTree> excludePaths; // .. it is not equal to or below an exclude path\n        StrLightSet includeExtensions; // .. file extension, mimetype matches ( it's\n        MimeSet includeMimetypes;    //   more complicated than that)\n        qint64 maxFileSize {500*1024}; // .. it's not bigger than this size\n        uint maxCountOfFiles {3}; // .. we have not already collected that many read files\n\n        int flushToDiskTotalSize {1024*1024*10}; // read files (scripts) are cached in memory. If their total size is\n                                  // greater than that, flush to disk (database)\n\n        Q_DISABLE_COPY(ScriptFileSettings)\n        DISABLE_MOVE(ScriptFileSettings)\n    };\n\n\n\npublic:\n    void setUserCfgDir(const QString& p);\n    void setUserDataDir(const QString& p);\n    void load();\n    QString chooseShournalRunBackend();\n\n    const HashSettings& hashSettings() const;\n    const WriteFileSettings& writeFileSettings() const;\n    const ReadFileSettings& readFileSettings() const;\n    const ScriptFileSettings& readEventScriptSettings() const;\n\n    QString cfgAppDir();\n    QString cfgFilepath();\n    QString dataDir();\n\n    const QStringList& defaultIgnoreCmds();\n\n    const StringSet& ignoreCmds();\n    const StringSet& ignoreCmdsRegardslessOfArgs();\n\n    const StrLightSet &getMountIgnorePaths();\n    bool getMountIgnoreNoPerm() const;\n\npublic:\n    ~Settings() = default;\n    Q_DISABLE_COPY(Settings)\n    DISABLE_MOVE(Settings)\n\npublic:\n    static const char* SECT_READ_NAME;\n    static const char* SECT_READ_KEY_ENABLE;\n    static const char* SECT_READ_KEY_INCLUDE_PATHS;\n\n    static const char* SECT_SCRIPTS_NAME;\n    static const char* SECT_SCRIPTS_ENABLE;\n    static const char* SECT_SCRIPTS_INCLUDE_PATHS;\n    static const char* SECT_SCRIPTS_INCLUDE_FILE_EXTENSIONS;\n\nprivate:\n    struct ReadVersionReturn {\n        QVersionNumber ver;\n        QString verFilePath;\n    };\n\n    Settings() = default;\n    void addIgnoreCmd(QString cmd, bool warnIfNotFound, const QString & ignoreCmdsSectName);\n    bool loadSections(const QVersionNumber& parsedCfgVersion);\n    bool loadSectWrite(const QVersionNumber& parsedCfgVersion);\n    bool loadSectRead(const QVersionNumber& parsedCfgVersion);\n    void loadSectScriptFiles();\n    void loadSectIgnoreCmd();\n    void loadSectMount();\n    void loadSectHash();\n\n    ReadVersionReturn readVersion(SafeFileUpdate &verUpd8);\n    bool updateCfgScheme(const QVersionNumber&, ReadVersionReturn&);\n    void storeCfg(const QVersionNumber &configSchemeVer, SafeFileUpdate& cfgUpd8,\n                  SafeFileUpdate& verUpd8);\n    std::shared_ptr<PathTree> loadPaths(qsimplecfg::Cfg::Section_Ptr& section,\n              const QString& keyName, bool eraseSubpaths,\n              const std::unordered_set<QString> & defaultValues,\n              PathTree* hiddenPaths=nullptr);\n\n    qsimplecfg::Cfg m_cfg;\n    HashSettings m_hashSettings;\n    WriteFileSettings m_wSettings;\n    ReadFileSettings m_rSettings;\n    ScriptFileSettings m_scriptSettings;\n    StrLightSet m_mountIgnorePaths;\n    bool m_mountIgnoreNoPerm {false};\n    bool m_settingsLoaded {false};\n    StringSet m_ignoreCmds;\n    StringSet m_ignoreCmdsRegardlessOfArgs;\n    const QString m_userHome { QDir::homePath() };\n    const QString m_workingDir { QDir::currentPath() };\n    QString m_userCfgDir;\n    QString m_userDataDir;\n\nprivate:\n    // unit testing...\n    friend class FileEventHandlerTest;\n    friend class IntegrationTestShell;\n    friend class GeneralTest;\n};\n\n\n"
  },
  {
    "path": "src/common/shournal_run_common.cpp",
    "content": "#include \"shournal_run_common.h\"\n\n#include \"app.h\"\n#include \"conversions.h\"\n#include \"qoutstream.h\"\n#include \"translation.h\"\n\nvoid shournal_run_common::print_summary(uint64_t n_wEvents, uint64_t n_rEvents,\n                   uint64_t n_lostEvents,\n                   uint64_t n_storedEvents,\n                   uint64_t targetFileSize){\n    QErr() << qtr(\"=== %1 summary ===\\n\"\n                  \"number of write-events: %2\\n\"\n                  \"number of read-events: %3\\n\"\n                  \"number of lost events: %4\\n\"\n                  \"number of stored read files: %5\\n\"\n                  \"size of tmp-file: %6\\n\")\n              .arg(app::CURRENT_NAME)\n              .arg(n_wEvents)\n              .arg(n_rEvents)\n              .arg(n_lostEvents)\n              .arg(n_storedEvents)\n              .arg(Conversions().bytesToHuman(targetFileSize));\n}\n\nQOptArg shournal_run_common::mkarg_cfgdir()\n{\n    return QOptArg(\"\", \"cfg-dir\",\n                   qtr(\"Override the path to shournal's configuration directory.\"));\n}\n\nQOptArg shournal_run_common::mkarg_datadir()\n{\n    return QOptArg(\"\", \"data-dir\",\n                   qtr(\"Override the path to shournal's data directory.\"));\n}\n"
  },
  {
    "path": "src/common/shournal_run_common.h",
    "content": "#pragma once\n\n#include <stdint.h>\n\n#include \"util.h\"\n#include \"qoptarg.h\"\n\nnamespace shournal_run_common {\n\nvoid print_summary(uint64_t n_wEvents, uint64_t n_rEvents,\n                   uint64_t n_lostEvents,\n                   uint64_t n_storedEvents,\n                   uint64_t targetFileSize);\n\nQOptArg mkarg_cfgdir();\nQOptArg mkarg_datadir();\n\n} // namespace shournal_run_common\n\n\n"
  },
  {
    "path": "src/common/socket_message.cpp",
    "content": "\n\n#include \"socket_message.h\"\n\n\n\nconst char* socket_message::socketMsgToStr(E_SocketMsg msg){\n    switch (msg) {\n    case E_SocketMsg::SETUP_DONE: return \"SETUP_DONE\";\n    case E_SocketMsg::SETUP_FAIL: return \"SETUP_FAIL\";\n    case E_SocketMsg::CLEAR_EVENTS: return \"CLEAR_EVENTS\";\n    case E_SocketMsg::COMMAND: return \"COMMAND\";\n    case E_SocketMsg::RETURN_VALUE: return \"RETURN_VALUE\";\n    case E_SocketMsg::EMPTY: return \"EMPTY\";\n    case E_SocketMsg::LOG_MESSAGE: return \"LOG_MESSAGE\";\n    case E_SocketMsg::CMD_START_DATETIME: return \"CMD_START_DATETIME\";\n    case E_SocketMsg::ENUM_END: return \"ENUM_END\";\n    }\n    return \"UNHANDLED ENUM CASE\";\n}\n"
  },
  {
    "path": "src/common/socket_message.h",
    "content": "#pragma once\n\n\nnamespace socket_message {\n\n/// Messages send from shell observation to shournal process or vice versa\nenum class E_SocketMsg { SETUP_DONE, SETUP_FAIL, CLEAR_EVENTS,\n                         COMMAND, RETURN_VALUE, EMPTY,\n                         LOG_MESSAGE, CMD_START_DATETIME, ENUM_END };\n\nconst char* socketMsgToStr(E_SocketMsg msg);\n\n\n}\n\n"
  },
  {
    "path": "src/common/stdiocpp.cpp",
    "content": "\n#include <stdio.h>\n#include <errno.h>\n#include <stdlib.h>\n#include <fcntl.h>\n#include <unistd.h>\n\n#include \"stdiocpp.h\"\n#include \"util.h\"\n#include \"translation.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n\n\nstdiocpp::QExcStdio::QExcStdio\n(QString text, const FILE *file, bool collectErrno, bool collectStacktrace) :\n    QExcCommon(text, false)\n{\n    m_descrip += (file == nullptr) ? \"\"\n                                   : \" - flags: \" + QString::number(file->_flags);\n    if(collectErrno){\n        m_descrip += \" (\" + QString::number(errno) +\n                \"): \" + translation::strerror_l(errno);\n    }\n\n    if(collectStacktrace){\n        appendStacktraceToDescrip();\n    }\n\n}\n\n/// Create an unnamed temp-file respecting\n/// env-variable TMPDIR (other than the canonical\n/// tmpfile(3)).\nFILE *stdiocpp::tmpfile(int o_flags __attribute__ ((unused)))\n{\n    int fd = -1;\n    try {\n        fd = osutil::unnamed_tmp();\n        return stdiocpp::fdopen(fd, \"w+\");\n    } catch (const os::ExcOs& ex) {\n        throw QExcStdio(ex.what(), nullptr, true);\n    } catch (const QExcStdio&) {\n        if(fd != -1){\n            close(fd);\n        }\n        throw ;\n    }\n}\n\nFILE *stdiocpp::fopen(const char *pathname, const char *mode)\n{\n    FILE* f = ::fopen(pathname, mode);\n    if(f == nullptr ){\n        throw QExcStdio(QString(\"Cannot open %1 with mode %2: \")\n                        .arg(pathname, mode), nullptr, true);\n    }\n    return f;\n}\n\nFILE *stdiocpp::fdopen(int fd, const char *mode){\n    FILE* f = ::fdopen(fd, mode);\n    if(f == nullptr ){\n        throw QExcStdio(QString(\"Cannot open fd with mode %2: \")\n                        .arg(mode), nullptr, true);\n    }\n    return f;\n}\n\n\nvoid stdiocpp::fclose(FILE *stream)\n{\n    if(::fclose(stream) != 0){\n        throw QExcStdio(\"fclose failed: \", nullptr, true);\n    }\n}\n\n\nint stdiocpp::fgetc_unlocked(FILE *stream)\n{\n    return ::fgetc_unlocked(stream);\n}\n\n\nsize_t stdiocpp::fwrite_unlocked(const void *ptr, size_t size, size_t n_items, FILE *stream)\n{\n    size_t items_written = ::fwrite_unlocked(ptr , size, n_items, stream);\n    if( items_written != n_items){\n       throw QExcStdio(QString(\"fwrite_unlocked failed (only %1 of %2 items written): \")\n                       .arg(items_written).arg(n_items),\n                       stream);\n    }\n    return items_written;\n}\n\n\nvoid stdiocpp::fflush(FILE *stream)\n{\n    if(::fflush(stream) != 0){\n        throw QExcStdio(\"fflush failed: \", nullptr, true);\n    }\n}\n\n\nsize_t stdiocpp::fread_unlocked(void *ptr, size_t size, size_t n, FILE *stream)\n{\n    return ::fread_unlocked(ptr, size, n, stream);\n}\n\n\nint stdiocpp::fseek(FILE *stream, long offset, int whence)\n{\n    int new_offset = ::fseek(stream, offset, whence);\n    if(new_offset == -1){\n        throw QExcStdio(\"fseek failed: \", stream, true);\n    }\n    return new_offset;\n}\n\nlong int stdiocpp::ftell(FILE *stream){\n    long int offset = ::ftell(stream);\n    if(offset == -1){\n        throw QExcStdio(\"ftell failed: \", stream, true);\n    }\n    return offset;\n}\n\n\n\n/// Warning: not threadsafe.\n/// stdio.h does not provide such a functionality, so we must take care\n/// that buffer is flushed before using the raw OS-ftruncate\nvoid stdiocpp::ftruncate_unlocked(FILE *stream)\n{\n    stdiocpp::fflush(stream);\n    stdiocpp::fseek(stream, 0, SEEK_SET);\n    if(::ftruncate(fileno(stream), 0) == -1) {\n        throw QExcStdio(\"POSIX ftruncate failed\", stream, true);\n    }\n}\n\n\n\n\n"
  },
  {
    "path": "src/common/stdiocpp.h",
    "content": "#pragma once\n\n#include \"exccommon.h\"\n\n/// Very thin wrappers around some stdio functions\n/// with exceptions, etc.\nnamespace stdiocpp {\n\nclass QExcStdio : public QExcCommon\n{\npublic:\n    explicit QExcStdio(QString text,\n                       const FILE* file, bool collectErrno=false,\n                       bool collectStacktrace=true);\n};\n\nFILE* tmpfile(int o_flags=0);\nFILE *fopen(const char *pathname, const char *mode);\nFILE *fdopen(int fd, const char *mode);\nvoid fclose(FILE *stream);\nint fgetc_unlocked(FILE *stream);\n\nsize_t fwrite_unlocked(const void *ptr, size_t size, size_t n_items,\n                              FILE *stream);\nvoid fflush(FILE *stream);\nsize_t fread_unlocked(void *ptr, size_t size, size_t n,\n                             FILE *stream);\nint fseek(FILE *stream, long offset, int whence);\n\nlong int ftell(FILE *stream);\nvoid ftruncate_unlocked(FILE* stream);\n\n} // namespace stdiocpp\n\n\n\n\n"
  },
  {
    "path": "src/common/stupidinject.cpp",
    "content": "#include \"stupidinject.h\"\n\n\nvoid StupidInject::addInjection(const StupidInject::Action& action)\n{\n    m_actions.push_back(action);\n}\n\nvoid StupidInject::addInjection(const char *trigger, const char *replacement)\n{\n    Action act;\n    act.trigger = trigger;\n    act.func = [replacement](QTextStream &out){\n        out << replacement;\n    };\n    m_actions.push_back(act);\n}\n\nvoid StupidInject::addInjection(const char *trigger, const std::function<void (QTextStream &)>& func)\n{\n    Action act;\n    act.trigger = trigger;\n    act.func = func;\n    m_actions.push_back(act);\n}\n\n\nvoid StupidInject::stream(const char *input, QTextStream &out)\n{\n    const char* lastBegin = input;\n    for(const auto& action : m_actions){\n        const char *triggerInInput = strstr(lastBegin, action.trigger.constData());\n        if(triggerInInput == nullptr){\n            throw EqcInjectTriggerNotFound(\"Trigger not found: \" + action.trigger);\n        }\n        // length of the string that has yet to be written before injecting\n        auto lastInputLenght = triggerInInput - lastBegin;\n        const auto lastInput = QByteArray::fromRawData(lastBegin, int(lastInputLenght));\n        out << lastInput;\n        action.func(out);\n        lastBegin = triggerInInput + action.trigger.size();\n    }\n    // the rest still needs to be written\n    out << lastBegin;\n}\n"
  },
  {
    "path": "src/common/stupidinject.h",
    "content": "#pragma once\n\n#include <functional>\n\n#include <QTextStream>\n#include <QVector>\n\n#include \"exccommon.h\"\n\nclass EqcInjectTriggerNotFound : public QExcCommon\n{\npublic:\n    using QExcCommon::QExcCommon;\n};\n\n\n/// \"Inject\" arbitrary content into a text-stream. The actions\n/// have to be added in the order of their later occurence\n/// within the input-stream.\nclass StupidInject\n{\npublic:\n    struct Action {\n        std::function< void(QTextStream &out)> func;\n        QByteArray trigger;\n\n    };\n\n    void addInjection(const Action& action);\n    void addInjection(const char* trigger, const char* replacement);\n    void addInjection(const char* trigger,const std::function< void(QTextStream &out)>& func);\n\n    void stream(const char* input, QTextStream& out);\n\nprivate:\n    QVector<Action> m_actions;\n};\n\n"
  },
  {
    "path": "src/common/subprocess.cpp",
    "content": "\n#include <unistd.h>\n#include <wait.h>\n#include <cstdio>\n#include <iostream>\n#include <cstring>\n#include <cassert>\n#include <string>\n#include <fcntl.h>\n#include <linux/limits.h>\n\n#include \"subprocess.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"excos.h\"\n#include \"util.h\"\n#include \"cleanupresource.h\"\n#include \"fdentries.h\"\n\nusing osutil::closeVerbose;\n\nenum class LaunchMsgType { PID, EXCEPTION, ENUM_END };\n\nstruct LaunchMsg{\n    LaunchMsgType msgType;\n    int errorNumber; //errno\n    pid_t pid;\n};\nstatic_assert (sizeof (LaunchMsg) <= PIPE_BUF, \"LaunchMsg is too big...\" );\n\nstatic_assert (std::is_pod<LaunchMsg>(), \"\");\n\n\nnamespace  {\n\n\n/// @throws ExcOs\nbool readMsg(int fd, LaunchMsg* msg){\n    ssize_t readN = os::read(fd, msg, sizeof (LaunchMsg));\n    if(readN == 0){\n        return false;\n    }\n    if(readN != sizeof (LaunchMsg)){\n        // should never happen\n        throw os::ExcOs(\"Failed to launch external process, \"\n                        \"received invalid message from child.\",\n                        EINVAL);\n    }\n    return true;\n}\n\n[[noreturn]]\nvoid throwFailedToLaunchEx(char *const argv[],\n                                  const LaunchMsg & msg,\n                                  const std::string& descrip=\"\"){\n    std::string desc_;\n    if(descrip.empty()){\n       desc_ = descrip;\n    } else {\n        desc_ = descrip + \" - \";\n    }\n    throw os::ExcOs(\"Failed to launch external process <\"\n                    + argvToStr(argv) + \"> -\" + desc_,\n                    msg.errorNumber);\n}\n\n\n/// convert to null-terminated vector which will be passed as argv.\nstd::vector<char*> toPointerVect(const subprocess::Args_t& args){\n    std::vector<char*> pointerVec(args.size() + 1 ); // + 1 because of terminating NULL\n    for(unsigned i = 0; i < args.size() ; ++i) {\n        pointerVec[i] = const_cast<char*>(args[i].c_str());\n    }\n    pointerVec.back() = nullptr;\n    return pointerVec;\n}\n\n} // namespace\n\nsubprocess::Subprocess::Subprocess() :\n    m_lastPid(std::numeric_limits<pid_t>::max()),\n    m_asRealUser(false),\n    m_forwardAllFds(false),\n    m_lastCallWasDetached(false),\n    m_environ(nullptr),\n    m_inNewSid(false),\n    m_waitForSetup(true),\n    m_lastCallWaitedForSetup(false)\n{}\n\nvoid subprocess::Subprocess::call(char *const argv[],\n                                  bool forwardStdin, bool forwardStdout, bool forwardStderr)\n{\n    this->call(argv[0], argv, forwardStdin, forwardStdout, forwardStderr);\n}\n\n\nvoid subprocess::Subprocess::call(const Args_t &args, bool forwardStdin,\n                                  bool forwardStdout, bool forwardStderr)\n{\n\n    this->call(toPointerVect(args).data(), forwardStdin, forwardStdout, forwardStderr);\n}\n\n/// Call provided program after fork. Note that per default all file descriptors\n/// except stdin, stdout and stderr are closed.\n/// @throw ExcOs\nvoid subprocess::Subprocess::call(const char *filename,\n                                  char * const argv[], bool forwardStdin,\n                                  bool forwardStdout, bool forwardStderr)\n{\n    doCall(filename, argv, forwardStdin, forwardStdout, forwardStderr,\n           false);\n}\n\n/// Call provided program after double-fork (daemonize).\n/// Waits, until grandchild-process. Note that per default all file descriptors\n/// except stdout and stderr are closed.\n/// @throw ExcOs\nvoid subprocess::Subprocess::callDetached(const char *filename, char * const argv[],\n                                          bool forwardStdin, bool forwardStdout,\n                                          bool forwardStderr)\n{\n   doCall(filename, argv, forwardStdin, forwardStdout, forwardStderr,\n          true);\n}\n\n\nvoid subprocess::Subprocess::callDetached(char * const argv[], bool forwardStdin,\n                                          bool forwardStdout, bool forwardStderr)\n{\n   callDetached(argv[0], argv, forwardStdin, forwardStdout, forwardStderr);\n}\n\n\nvoid subprocess::Subprocess::callDetached(const Args_t &args, bool forwardStdin,\n                                          bool forwardStdout, bool forwardStderr)\n{\n    this->callDetached(toPointerVect(args).data(), forwardStdin, forwardStdout, forwardStderr);\n}\n\n\nvoid subprocess::Subprocess::\ndoCall(const char *filename, char * const argv[],\n       bool forwardStdin, bool forwardStdout,\n       bool forwardStderr, bool detached)\n{\n    m_lastCallWasDetached = detached;\n    m_lastCallWaitedForSetup = m_waitForSetup;\n    if(m_waitForSetup){\n        doCallWaitForSetup(filename, argv, forwardStdin, forwardStdout, forwardStderr,\n                   detached);\n    } else {\n        os::Pipes_t dummyPipe {-1, -1};\n        doFork(filename, argv, forwardStdin, forwardStdout, forwardStderr,\n               detached, dummyPipe);\n    }\n}\n\n/// Create a pipe and wait until the child-process has closed it's\n/// write-end, either by calling execve or on error.\n/// In case of a detached call, the pid of the grandchild-process\n/// is also send via the pipe and made available via m_lastPid.\nvoid subprocess::Subprocess::\ndoCallWaitForSetup(const char *filename, char * const argv[],\n           bool forwardStdin, bool forwardStdout,\n           bool forwardStderr, bool detached){\n    auto startPipe = os::pipe( O_CLOEXEC  | O_DIRECT );\n    auto closeStartRead = finally([&startPipe] {\n        closeVerbose(startPipe[0]);\n    });\n    auto closeStartWrite = finally([&startPipe] {\n        closeVerbose(startPipe[1]);\n    });\n\n    doFork(filename, argv, forwardStdin, forwardStdout, forwardStderr,\n           detached, startPipe);\n\n    closeVerbose(startPipe[1]);\n    closeStartWrite.setEnabled(false);\n    LaunchMsg msg;\n\n    if(! detached){\n        if(!readMsg(startPipe[0], &msg)){\n            // no error\n            return;\n        }\n        assert(msg.msgType == LaunchMsgType::EXCEPTION);\n        throwFailedToLaunchEx(argv, msg);\n    }\n\n    // if detached we need the grandchild-pid\n\n    if(!readMsg(startPipe[0], &msg)){\n        // first message *must* be pid\n       throwFailedToLaunchEx(argv, msg, \"Missing pid reply from grandchild\");\n    }\n\n    switch (msg.msgType) {\n    case LaunchMsgType::PID:\n        // normal case: pid of grandchild\n        m_lastPid = msg.pid;\n        break;\n    case LaunchMsgType::EXCEPTION:\n       throwFailedToLaunchEx(argv, msg);\n    default:\n        assert(false);\n        throwFailedToLaunchEx(argv, msg,\n                               \" Bad response from grandchild: \"\n                              + std::to_string(int(msg.msgType)));\n\n    }\n    // second reply (if any) must be an exception always\n    if(!readMsg(startPipe[0], &msg)){\n        return;\n    }\n    assert(msg.msgType == LaunchMsgType::EXCEPTION);\n    throwFailedToLaunchEx(argv, msg);\n}\n\n\nvoid subprocess::Subprocess::\ndoFork(const char *filename, char * const argv[],\n           bool forwardStdin, bool forwardStdout,\n           bool forwardStderr, bool detached,\n           os::Pipes_t &startPipe){\n\n    m_lastPid = os::fork();\n    if(m_lastPid == 0){\n        if(m_inNewSid)\n            os::setsid();\n\n        if(detached){\n            // child: fork again and exit\n            try {\n                pid_t pid2 = os::fork();\n                if(pid2 == 0){\n                    handleChild(filename, argv, startPipe,\n                                forwardStdin, forwardStdout, forwardStderr);\n                }\n            } catch (const os::ExcOs& ex){\n                // should never happen\n                std::cerr << __func__ << \": \" << ex.what() << \"\\n\";\n                exit(1);\n            }\n            exit(0);\n        } else {\n            handleChild(filename, argv, startPipe,\n                        forwardStdin, forwardStdout, forwardStderr);\n        }\n    }\n\n}\n\n\n/// Wait for the subprocess to finish. Does *not* work\n/// for detached process\n/// @return the exit value of the process\n/// @throws ExcOs, ExcProcessExitNotNormal\nint subprocess::Subprocess::waitFinish()\n{\n    if(m_lastCallWasDetached){\n        throw os::ExcOs(\"Attempted to wait for child process, \"\n                        \"although last call was <detached>\", 0);\n    }\n    int child_status = 1;\n    os::waitpid (m_lastPid, &child_status) ;\n    return child_status;\n}\n\nvoid subprocess::Subprocess::setAsRealUser(bool val)\n{\n    m_asRealUser = val;\n}\n\n/// Per default all file-descriptors are closed, except\n/// for \"nomal\" call:\n///         stdin, stdout and stderr.\n/// for detached call:\n///         stdout and stderr\n/// With this method you override the default.\nvoid subprocess::Subprocess::setForwardFdsOnExec(const std::unordered_set<int> &forwardFds)\n{\n    m_forwardFds = forwardFds;\n}\n\nvoid subprocess::Subprocess::setForwardAllFds(bool val)\n{\n    m_forwardAllFds = val;\n}\n\nvoid subprocess::Subprocess::setInNewSid(bool val)\n{\n    m_inNewSid = val;\n}\n\n/// Wait for child-process-setup on call or callDetached.\nvoid subprocess::Subprocess::setWaitForSetup(bool waitForSetup)\n{\n    m_waitForSetup = waitForSetup;\n}\n\nvoid subprocess::Subprocess::closeAllButForwardFds(os::Pipes_t &startPipe)\n{\n    // startpipe fds have O_CLOEXEC set, if exec fails, the respond is sent via\n    // them, so do not close here.\n\n    for(const int fd : osutil::FdEntries()){\n        if(fd <= 2){\n            // stdin, -out and -err are handeled separately\n            continue;\n        }\n        if(m_forwardFds.find(fd) == m_forwardFds.end() &&\n                fd != startPipe[0] && fd != startPipe[1])\n            // not in white-list, close\n            closeVerbose(fd);\n    }\n}\n\nvoid subprocess::Subprocess::handleChild(const char *filename, char *const argv[],\n                                         os::Pipes_t &startPipe, bool forwardStdin,\n                                         bool forwardStdout, bool forwardStderr)\n{    \n    try {\n        if(m_callbackAsChild)\n            m_callbackAsChild();\n\n        if(m_asRealUser){\n            os::setgid(os::getgid());\n            os::setuid(os::getuid());\n        }\n        if(startPipe[0] != -1 && m_lastCallWasDetached){\n            LaunchMsg msg{};\n            msg.msgType = LaunchMsgType::PID;\n            msg.pid = getpid();\n            os::write(startPipe[1], &msg, sizeof (LaunchMsg));\n        }        \n        if(! m_forwardAllFds){\n            if(! forwardStdin) closeVerbose(STDIN_FILENO);\n            if(! forwardStdout) closeVerbose(STDOUT_FILENO);\n            if(! forwardStderr) closeVerbose(STDERR_FILENO);\n            closeAllButForwardFds(startPipe);\n        }\n        os::exec(filename, argv, m_environ);\n    } catch (const os::ExcOs& ex) {\n        if(startPipe[0] == -1){\n            std::cerr << \"Failed to launch subprocess: \"\n                      << ex.what() << \"\\n\";\n            exit(ex.errorNumber());\n        }\n        LaunchMsg msg{};\n        msg.msgType = LaunchMsgType::EXCEPTION;\n        msg.errorNumber = ex.errorNumber();\n        try {\n            os::write(startPipe[1], &msg, sizeof (LaunchMsg));\n        } catch (const os::ExcOs& ex) {\n            // should never happen\n            std::cerr << __func__ << \": \" << ex.what() << \"\\n\";\n        }\n        exit(ex.errorNumber());\n    }\n}\n\n\nvoid subprocess::Subprocess::setEnviron(char **env)\n{\n    m_environ = env;\n}\n\nvoid subprocess::Subprocess::setCallbackAsChild\n(const std::function<void ()> &callbackAsChild)\n{\n    m_callbackAsChild = callbackAsChild;\n}\n\n\n/// In case of callDetached the grandchild-PID is returned (\n/// but only if we waited for setup, else the pid is invalid!).\npid_t subprocess::Subprocess::lastPid() const\n{\n    if(m_lastCallWasDetached && ! m_lastCallWaitedForSetup){\n        throw QExcProgramming(\"m_lastCallWasDetached && ! m_lastCallWaitedForSetup\");\n    }\n\n    return m_lastPid;\n}\n"
  },
  {
    "path": "src/common/subprocess.h",
    "content": "#pragma once\n\n#include <unordered_set>\n\n#include \"os.h\"\n\n\n\nnamespace subprocess {\n\ntypedef std::vector<std::string> Args_t;\n\n/// Call external programs via fork and exec\n/// and wait for it to finish later\nclass Subprocess {\npublic:\n\n    Subprocess();\n\n    void call(char *const argv[],\n              bool forwardStdin=true,\n              bool forwardStdout=true,\n              bool forwardStderr=true);\n\n    void call(const Args_t &args, bool forwardStdin=true,\n                                  bool forwardStdout=true,\n                                  bool forwardStderr=true);\n\n    void call(const char *filename, char * const argv[],\n              bool forwardStdin=true,\n              bool forwardStdout=true,\n              bool forwardStderr=true);\n\n    void callDetached(char *const argv[], bool forwardStdin=false,\n                      bool forwardStdout=true,\n                      bool forwardStderr=true);\n\n    void callDetached(const char *filename, char *const argv[], bool forwardStdin=false,\n                      bool forwardStdout=true,\n                      bool forwardStderr=true);\n\n    void callDetached(const Args_t &args, bool forwardStdin=false,\n                      bool forwardStdout=true,\n                      bool forwardStderr=true);\n\n    int waitFinish();\n\n    void setAsRealUser(bool val);\n    void setForwardFdsOnExec(const std::unordered_set<int>& forwardFds);\n    void setForwardAllFds(bool val);\n    void setInNewSid(bool val);\n    void setWaitForSetup(bool waitForSetup);\n\n    pid_t lastPid() const;\n    void setEnviron(char **env);\n\n    void setCallbackAsChild(const std::function<void ()> &callbackAsChild);\n\n\nprivate:\n    void closeAllButForwardFds(os::Pipes_t &startPipe);\n    [[noreturn]]\n    void handleChild(const char *filename, char * const argv[], os::Pipes_t & startPipe,\n                     bool forwardStdin, bool forwardStdout, bool forwardStderr);\n    void doCall(const char *filename, char * const argv[],\n                    bool forwardStdin,\n                    bool forwardStdout,\n                    bool forwardStderr,\n                    bool detached);\n    void doCallWaitForSetup(const char *filename, char * const argv[],\n                    bool forwardStdin,\n                    bool forwardStdout,\n                    bool forwardStderr,\n                    bool detached);\n    void doFork(const char *filename, char * const argv[],\n                    bool forwardStdin,\n                    bool forwardStdout,\n                    bool forwardStderr,\n                    bool detached,\n                    os::Pipes_t &startPipe);\n\n\n    pid_t m_lastPid;\n    bool m_asRealUser;\n    std::unordered_set<int> m_forwardFds;\n    bool m_forwardAllFds;\n    bool m_lastCallWasDetached;\n    char** m_environ;\n    bool m_inNewSid;\n    bool m_waitForSetup;\n    bool m_lastCallWaitedForSetup;\n    std::function< void()> m_callbackAsChild;\n};\n\n\n} // namespace subprocess\n"
  },
  {
    "path": "src/common/user_kernerl.h",
    "content": "/* Common helpers which can be used\n * from within user- and linux-kernel-\n * space\n */\n\n#pragma once\n\n\n\n#ifdef __KERNEL__\n\n#ifdef DEBUG\n#include <linux/bug.h>\n// #define kuassert WARN_ON\n#define kuassert(condition) WARN_ON(!(condition))\n\n#else\n#define kuassert(condition)\n#endif\n\n#else\n\n#include <assert.h>\n#define kuassert assert\n\n#ifndef likely\n#ifdef __GNUC__\n#define likely(x)       __builtin_expect(!!(x), 1)\n#define unlikely(x)     __builtin_expect(!!(x), 0)\n#else\n#define likely(x)       (x)\n#define unlikely(x)     (x)\n#endif\n#endif\n\n#endif\n"
  },
  {
    "path": "src/common/util/CMakeLists.txt",
    "content": "\n\nadd_library(lib_util\n    cleanupresource.h\n    compat.h\n    compareoperator.cpp\n    conversions.cpp\n    cpp_exit.cpp\n    exccommon.cpp\n    qoutstream.cpp\n    qformattedstream.cpp\n    strlight.cpp\n    strlight_util.cpp\n    staticinitializer.h\n    sys_ioprio.h\n    translation.cpp\n    util.cpp\n    util_performance.cpp\n    )\n\n\ntarget_link_libraries(lib_util PUBLIC\n    Qt5::Core\n    uuid\n    )\n"
  },
  {
    "path": "src/common/util/cleanupresource.h",
    "content": "#pragma once\n\n#include <iostream>\n\n#include \"util.h\"\n\nnamespace private_namesapce {\n\n\ntemplate <typename F>\nstruct CleanupResource\n{\n    CleanupResource(F f, bool enable) :\n        m_cleanF{f},\n        m_enabled(enable)\n    {}\n\n    ~CleanupResource() {\n        // Do not throw from destructor\n        try {\n            if(m_enabled){\n                m_cleanF();\n            }\n        } catch (const std::exception& ex ) {\n            std::cerr << ex.what() << \"\\n\";\n        }\n    }\n\n    void setEnabled(bool val){\n        m_enabled = val;\n    }\n\npublic:\n    Q_DISABLE_COPY(CleanupResource)\n    DEFAULT_MOVE(CleanupResource)\n\nprivate:\n    F m_cleanF;\n    bool m_enabled;\n};\n\n\n} // namespace private_namesapce\n\n\ntemplate <typename F>\nprivate_namesapce::CleanupResource<F> finally(F f, bool enable=true) __attribute__ ((warn_unused_result));\n\n\n/// Perform a final action before leaving the block:\n/// Usage:\n/// char* buf = new char;\n/// auto deleter = finally([buf] {delete buf; });\ntemplate <typename F>\nprivate_namesapce::CleanupResource<F> finally(F f, bool enable){\n    return private_namesapce::CleanupResource<F>(f, enable);\n}\n"
  },
  {
    "path": "src/common/util/compareoperator.cpp",
    "content": "#include <QHash>\n#include <cassert>\n\n#include \"compareoperator.h\"\n#include \"exccommon.h\"\n\nnamespace  {\n\nconst QHash<QString, E_CompareOperator>& termEnumHash(){\n    static const QHash<QString, E_CompareOperator> termEnumHash = {\n        {\"-gt\", E_CompareOperator::GT},\n        {\"-ge\", E_CompareOperator::GE},\n        {\"-lt\", E_CompareOperator::LT},\n        {\"-le\", E_CompareOperator::LE},\n        {\"-eq\", E_CompareOperator::EQ},\n        {\"-ne\", E_CompareOperator::NE},\n        {\"-like\", E_CompareOperator::LIKE},\n        {\"-between\", E_CompareOperator::BETWEEN}\n    };\n    return termEnumHash;\n}\n\n} // namespace\n\n\n\nCompareOperator::CompareOperator(E_CompareOperator op) : m_operator(op) {}\n\n/// Transform one of the commandline-passed operators into the enum and store it.\nbool CompareOperator::fromTerminal(const QString &val){    \n    if(val.isEmpty()){\n        return false;\n    }\n\n    assert(val.at(0) == '-');\n\n    const auto enumIt = termEnumHash().find(val);\n    if(enumIt == termEnumHash().constEnd()){\n        return false;\n    }\n    m_operator = enumIt.value();\n    return true;\n}\n\nQString CompareOperator::asSql() const\n{\n    QString sqlOperator;\n    switch (m_operator) {\n    case E_CompareOperator::GT: sqlOperator = \">\"; break;\n    case E_CompareOperator::GE: sqlOperator = \">=\"; break;\n    case E_CompareOperator::LT: sqlOperator = \"<\"; break;\n    case E_CompareOperator::LE: sqlOperator = \"<=\"; break;\n    case E_CompareOperator::EQ: sqlOperator = \"=\"; break;\n    case E_CompareOperator::NE: sqlOperator = \"!=\"; break;\n    case E_CompareOperator::LIKE: sqlOperator = \" LIKE \"; break;\n    case E_CompareOperator::BETWEEN: sqlOperator = \" BETWEEN \"; break;\n    case E_CompareOperator::ENUM_END: throw QExcProgramming(\"E_CompareOperator::ENUM_END\");\n    }\n    return sqlOperator;\n}\n\nQString CompareOperator::asTerminal() const\n{\n    QString sqlOperator;\n    switch (m_operator) {\n    case E_CompareOperator::GT: sqlOperator = \"-gt\"; break;\n    case E_CompareOperator::GE: sqlOperator = \"-ge\"; break;\n    case E_CompareOperator::LT: sqlOperator = \"-lt\"; break;\n    case E_CompareOperator::LE: sqlOperator = \"-le\"; break;\n    case E_CompareOperator::EQ: sqlOperator = \"-eq\"; break;\n    case E_CompareOperator::NE: sqlOperator = \"-ne\"; break;\n    case E_CompareOperator::LIKE: sqlOperator = \"-like\"; break;\n    case E_CompareOperator::BETWEEN: sqlOperator = \"-between\"; break;\n    case E_CompareOperator::ENUM_END: throw QExcProgramming(\"E_CompareOperator::ENUM_END\");\n    }\n    return sqlOperator;\n}\n\nE_CompareOperator CompareOperator::asEnum() const\n{\n    return m_operator;\n}\n\n\n\n\n"
  },
  {
    "path": "src/common/util/compareoperator.h",
    "content": "#pragma once\n\n#include <QString>\n#include <QObject>\n#include <QVector>\n\n\nenum class E_CompareOperator { GT,GE,LT,LE,EQ,NE,LIKE,BETWEEN,ENUM_END };\n\n\n/// The most important sql-operators which are used\n/// in this program as user-input (LE for less or equal),\n/// enum and sql operator (<=)\nclass CompareOperator\n{\npublic:\n    CompareOperator() = default;\n    CompareOperator(E_CompareOperator op);\n\n    bool fromTerminal(const QString& val);\n\n    QString asSql() const;\n    QString asTerminal() const;\n    E_CompareOperator asEnum() const;\n\nprivate:\n    E_CompareOperator m_operator{E_CompareOperator::EQ};\n};\n\n"
  },
  {
    "path": "src/common/util/compat.h",
    "content": "#pragma once\n\n#include <QtGlobal>\n#include <QString>\n#include <QResource>\n#include <QTextStream>\n#include <QDateTime>\n\n#if QT_VERSION < QT_VERSION_CHECK(5, 14, 0)\nnamespace Qt\n{\n    using SplitBehavior = QString::SplitBehavior;\n    const SplitBehavior SkipEmptyParts = SplitBehavior::SkipEmptyParts;\n    const auto endl = ::endl;\n\n    inline QDateTime datetimeFromDate(const QDate& date){\n        return QDateTime(date);\n    }\n}\n#else\nnamespace Qt\n{\n    inline QDateTime datetimeFromDate(const QDate& date){\n        return date.startOfDay();\n    }\n}\n#endif\n\n\n#if QT_VERSION < QT_VERSION_CHECK(5, 13, 0)\nnamespace Qt\n{\n    inline bool resourceIsCompressed(QResource &r){\n        return r.isCompressed();\n    }\n}\n#else\nnamespace Qt\n{\n    inline bool resourceIsCompressed(QResource &r){\n        return r.compressionAlgorithm() != QResource::NoCompression;\n    }\n}\n#endif\n\n"
  },
  {
    "path": "src/common/util/conversions.cpp",
    "content": "#include <cassert>\n#include <unordered_set>\n\n#include <QStringList>\n#include <QRegularExpression>\n\n#include \"conversions.h\"\n#include \"util.h\"\n\n\nstatic QHash<QString, char> validTimeUnitHash(){\n    static const QHash<QString, char> units {\n        {qtr(\"y\"), 'y'}, // Year\n        {qtr(\"m\"), 'm'},   // month\n        {qtr(\"d\"), 'd'},   // day\n        {qtr(\"h\"), 'h'},   // hour\n        {qtr(\"min\"), 'M'},   // minute\n        {qtr(\"s\"), 's'},   // second\n    };\n    return units;\n}\n\n\nExcConversion::ExcConversion(const QString  & text) :\n    QExcCommon(text, false)\n{}\n\n\n\n\n/// @returns by comma separated list of valid relative time units with description\n/// (y: year, m: month, ...).\nconst QString &Conversions::relativeDateTimeUnitDescriptions()\n{\n    static const auto s = qtr(\"y: year, m: month, d: day, h: hour, min: minute, s: second\");\n    return s;\n}\n\n\n/// Transform user supplied byte-sizes (\"3KiB\", \"2 MiB \", etc.) to int.\n/// @throws ExcConversion\nqint64 Conversions::bytesFromHuman(QString str)\n{\n    str = str.simplified();\n    str.replace( \" \", \"\" );\n\n    const QString errPreamble(qtr(\"Failed to convert bytesize '%1' - \").arg(str));\n\n    if(str.isEmpty()){\n        throw ExcConversion(errPreamble + qtr(\"it is empty.\"));\n    }\n    if(str[str.size() - 1].isDigit()){\n        // assuming bytes size\n        qint64 bytes;\n        if(! qVariantTo(str, &bytes)){\n            throw ExcConversion(errPreamble + qtr(\"it appears to be not an integer \"\n                                                         \"although no unit was given.\"));\n        }\n        return bytes;\n    }\n\n    int unitIdx = str.size() - 2;\n    for(; unitIdx >= 0; unitIdx--){\n        if(str[unitIdx].isDigit()){\n            unitIdx++;\n            break;\n        }\n    }\n    if(unitIdx == -1){\n        throw ExcConversion(errPreamble + qtr(\"no digit was given\"));\n    }\n\n    const QString unit = str.mid(unitIdx);\n    const QString val = str.left(unitIdx);\n\n    double bytesFloat;\n    if(! qVariantTo(val, &bytesFloat)){\n        throw ExcConversion(errPreamble + qtr(\"conversion from string '%1' to float failed\").arg(val));\n    }\n    if(bytesFloat < 0){\n        bytesFloat += -1;\n    }\n\n    static const std::unordered_set<QString> validUnitSet {\n        \"k\", \"kb\", \"kib\", \"m\", \"mb\", \"mib\", \"g\", \"gb\", \"gib\", \"t\", \"tb\", \"tib\"\n    };\n    if(validUnitSet.find(unit.toLower()) == validUnitSet.end()){\n        const QString validUnits(qtr(\"valid units include 'no unit', \"\n                                     \"K (Kib), M (MiB), G (GiB) and T (TiB) \"\n                                     \"but '%1' was given\").arg(unit));\n        throw ExcConversion(errPreamble + validUnits);\n    }\n\n    switch (unit[0].toLower().toLatin1()) {\n    case 'k': bytesFloat *= 1024.0; break;\n    case 'm': bytesFloat *= 1024.0*1024; break;\n    case 'g': bytesFloat *= 1024.0*1024*1024; break;\n    case 't': bytesFloat *= 1024.0*1024*1024*1024; break;\n    default: assert(false);\n    }\n    return static_cast<qint64>(bytesFloat);\n}\n\n\n/// size to human readbale string (Kib, Mib, etc....)\nQString Conversions::bytesToHuman(const qint64 bytes)\n{\n    float s = bytes;\n\n    static const QStringList list({\"KiB\", \"MiB\", \"GiB\", \"TiB\"});\n    QStringListIterator i(list);\n    QString unit(\"bytes\");\n    if(s <= 1024.0f){\n        return QString().setNum(s,'f',0)+\" \"+unit;\n    }\n\n    do {\n        unit = i.next();\n        s /= 1024.0f;\n    } while(s >= 1024.0f && i.hasNext());\n\n    return QString().setNum(s,'f',2)+\" \"+unit;\n}\n\n/// @param subtractIt: if true, the parsed date is subtracted from current one,\n/// else it is added.\n/// @throws ExcConversion\nQDateTime Conversions::relativeDateTimeFromHuman(const QString &str, bool subtractIt)\n{\n    static const QRegularExpression re(R\"((\\d+)(.+))\");\n    QRegularExpressionMatch match = re.match(str);\n    const QString errPreamble(qtr(\"Failed to convert relative date(time) '%1' - \").arg(str));\n    if (! match.hasMatch()) {\n        throw ExcConversion(errPreamble + qtr(\"It must be a digit followed by a timespec.\"));\n    }\n\n    // must always succeed, otherwise regex would be broken\n    int number = match.captured(1).toInt();\n    const QString parsedTimeSpec = match.captured(2).trimmed();\n    const auto & validUnits = validTimeUnitHash();\n\n    auto matchedUnitIt = validUnits.find(parsedTimeSpec);\n\n    if(matchedUnitIt == validUnits.end()){\n        // don't use auto here: older version of qt do not support QList<QString>::join...\n        QStringList units = validUnits.keys();\n        throw ExcConversion(errPreamble + qtr(\"%1 is not a valid timespec. Those are %2\")\n                                                    .arg(units.join(\",\")));\n    }\n\n    if(subtractIt){\n        // go back in time:\n        number = -number;\n    }\n\n    auto now = QDateTime::currentDateTime();\n    switch (matchedUnitIt.value()) {\n    case 'y': return now.addYears(number);\n    case 'm': return now.addMonths(number);\n    case 'd': return now.addDays(number);\n    case 'h': return now.addSecs(number*3600);\n    case 'M': return now.addSecs(number*60);\n    case 's': return now.addSecs(number);\n    default:\n        assert(false);\n    }\n    return {};\n}\n\nconst QString &Conversions::dateIsoFormatWithMilliseconds()\n{\n    static const QString f{\"yyyy-MM-ddTHH:mm:ss.zzz\"};\n    return f;\n}\n\n\n\n\n"
  },
  {
    "path": "src/common/util/conversions.h",
    "content": "#pragma once\n\n#include <QString>\n#include <QDateTime>\n\n#include \"exccommon.h\"\n\nclass ExcConversion : public QExcCommon\n{\npublic:\n     ExcConversion(const QString &text);\n};\n\n\n/// Parse datatypes from human input, display them human readable\nclass Conversions\n{\npublic:\n    static const QString& relativeDateTimeUnitDescriptions();\n\n    qint64 bytesFromHuman(QString str);\n    QString bytesToHuman(qint64 bytes);\n\n    QDateTime relativeDateTimeFromHuman(const QString& str, bool subtractIt);\n\n    static const QString& dateIsoFormatWithMilliseconds();\n};\n\n"
  },
  {
    "path": "src/common/util/cpp_exit.cpp",
    "content": "#include \"cpp_exit.h\"\n\n\n/// To allow for destructor calling of local objects, throw instead of\n/// calling exit\nvoid cpp_exit(int ret)\n{\n    throw ExcCppExit(ret);\n}\n"
  },
  {
    "path": "src/common/util/cpp_exit.h",
    "content": "#pragma once\n\n\nclass ExcCppExit\n{\npublic:\n    ExcCppExit(int ret) : m_ret(ret){}\n\n    int ret() const { return m_ret; }\nprivate:\n    int m_ret;\n\n};\n\n[[noreturn]]\nvoid cpp_exit(int ret);\n\n"
  },
  {
    "path": "src/common/util/exccommon.cpp",
    "content": "\n#include <utility>\n#include <QDebug>\n\n#include \"exccommon.h\"\n#include \"util.h\"\n#include \"translation.h\"\n\n\n\nExcCommon::ExcCommon(std::string text) :\n    m_descrip(std::move(text))\n{}\n\nconst char *ExcCommon::what() const noexcept\n{\n    return m_descrip.c_str();\n}\n\nstd::string &ExcCommon::descrip()\n{\n    return m_descrip;\n}\n\n\n\n\nQExcCommon::QExcCommon(QString text, bool collectStacktrace) :\n    m_descrip(std::move(text))\n{\n    if(collectStacktrace){\n       appendStacktraceToDescrip();\n    }\n\n}\n\nconst char *QExcCommon::what() const noexcept\n{\n    m_local8Bit = m_descrip.toLocal8Bit();\n    return m_local8Bit.constData();\n}\n\nQString QExcCommon::descrip() const\n{\n    return m_descrip;\n}\n\nvoid QExcCommon::setDescrip(const QString &descrip)\n{\n    m_descrip = descrip;\n    m_local8Bit = descrip.toLocal8Bit();\n}\n\nvoid QExcCommon::appendStacktraceToDescrip()\n{\n    const auto st = generate_trace_string();\n    m_descrip += \"\\n\" + QString::fromStdString(st);\n}\n\n\n\nQExcIllegalArgument::QExcIllegalArgument(const QString &text) :\n    QExcCommon (text)\n{\n\n}\n\nQExcProgramming::QExcProgramming(const QString &text) :\n    QExcCommon (text)\n{\n\n}\n\nQExcIo::QExcIo(QString text, bool collectStacktrace) :\n    QExcCommon(\"\", false)\n{\n    m_errorNumber = errno;\n    if(errno != 0){\n        text += \" (\" + QString::number(errno) +\n                \"): \" + translation::strerror_l(errno);\n    }\n\n    this->setDescrip(text);\n    if(collectStacktrace){\n        appendStacktraceToDescrip();\n    }\n}\n\nint QExcIo::errorNumber() const\n{\n    return m_errorNumber;\n}\n"
  },
  {
    "path": "src/common/util/exccommon.h",
    "content": "#pragma once\n\n#include <exception>\n#include <string>\n#include <QString>\n\nclass ExcCommon : public std::exception\n{\npublic:\n    explicit ExcCommon(std::string  text);\n\n    const char *what () const noexcept override;\n    std::string & descrip();\n\nprotected:\n    std::string m_descrip;\n};\n\n\n\nclass QExcCommon : public std::exception\n{\npublic:\n    explicit QExcCommon(QString  text, bool collectStacktrace=true);\n\n    const char *what () const noexcept override;\n    QString descrip() const;\n    void setDescrip(const QString &descrip);\n\nprotected:\n    void appendStacktraceToDescrip();\n\n    QString m_descrip;\n\nprivate:\n    mutable QByteArray m_local8Bit;\n};\n\n\nclass QExcIllegalArgument : public QExcCommon\n{\npublic:\n    QExcIllegalArgument(const QString & text);\n};\n\n/// Thrown in case of a detected bug^^\nclass QExcProgramming : public QExcCommon\n{\npublic:\n    QExcProgramming(const QString & text);\n};\n\n\nclass QExcIo : public QExcCommon\n{\npublic:\n    explicit QExcIo(QString  text, bool collectStacktrace=true);\n    int errorNumber() const;\nprivate:\n    int m_errorNumber;\n};\n"
  },
  {
    "path": "src/common/util/nullable_value.h",
    "content": "#pragma once\n\n#include <QMetaType>\n#include \"exccommon.h\"\n\n\nclass QExcNullDeref : public QExcCommon\n{\npublic:\n    using QExcCommon::QExcCommon;\n};\n\n\ntemplate <class T>\nclass NullableValue {\npublic:\n\n    typedef T value_type;\n\n    NullableValue() : m_isNull(true){}\n    NullableValue(const T& t) : m_isNull(false), m_value(t){}\n\n    const T& value() const {\n        if(m_isNull){\n            throw QExcNullDeref(\"Tried to obtain value while it is set to null\");\n        }\n        return m_value;\n    }\n\n    void setValue(const T& val){\n        m_value = val;\n        m_isNull = false;\n    }\n\n    bool isNull() const {\n        return m_isNull;\n    }\n\n    void setNull(){\n        m_isNull = true;\n    }\n\n    bool operator==(const NullableValue& rhs) const\n    {\n        if(isNull()){\n            return rhs.isNull();\n        }\n        // we are not null\n        if( rhs.isNull()){\n            return false;\n        }\n        // other is also not null.\n        // compare vals\n        return value() == rhs.value();\n    }\n\n    bool operator!=(const NullableValue& rhs) const\n    {\n        return!(operator==(rhs));\n    }\n\n    NullableValue& operator=(const T& val)\n    {\n        setValue(val);\n        return *this;\n    }\n\nprotected:\n    bool m_isNull;\n    T m_value;\n};\n\n\ntemplate <class T>\nbool operator==(const T& lhs, const NullableValue<T>& rhs) {\n    // comparing to value, so it cannot be null\n    if(rhs.isNull()){\n        return false;\n    }\n    return lhs == rhs.value();\n}\n\n\ntemplate <class T>\nbool operator==(const NullableValue<T>& lhs, const T& rhs) {\n    return rhs == lhs.value();\n}\n\ntypedef NullableValue<uint64_t> HashValue;\n\nQ_DECLARE_METATYPE(HashValue)\n"
  },
  {
    "path": "src/common/util/qformattedstream.cpp",
    "content": "#include \"qformattedstream.h\"\n\n#include \"util.h\"\n\nQFormattedStream::QFormattedStream(QString *string, QIODevice::OpenMode openMode) :\n    m_textStream(string, openMode)\n{\n    this->initCommon();\n}\n\nQFormattedStream::QFormattedStream(FILE *fileHandle, QIODevice::OpenMode openMode) :\n    m_textStream(fileHandle, openMode)\n{\n    this->initCommon();\n}\n\nQFormattedStream::QFormattedStream(QIODevice *device) :\n    m_textStream(device)\n{\n    this->initCommon();\n}\n\nQFormattedStream::QFormattedStream(QByteArray *array, QIODevice::OpenMode openMode) :\n    m_textStream(array, openMode)\n{\n    this->initCommon();\n}\n\nQFormattedStream::QFormattedStream(const QByteArray &array, QIODevice::OpenMode openMode) :\n    m_textStream(array, openMode)\n{\n    this->initCommon();\n}\n\n\nvoid QFormattedStream::initCommon()\n{\n    m_colNChars = 0;\n    m_maxLineWidth = std::numeric_limits<int>::max();\n    m_autoSepStreamChunks = true;\n    m_streamChunkSep = ' ';\n}\n\n\nQFormattedStream &QFormattedStream::operator<<(const QString &str)\n{\n    return (*this)<<(QStringRef(&str));\n}\n\n\nQFormattedStream &QFormattedStream::operator<<(const QStringRef &str) {\n    int wordStartIdx = -1;\n    for(int i=0; i < str.size(); i++){\n        const QChar& c = str.at(i);\n        if(m_colNChars == 0){\n           writeLineStart();\n        }\n        if(c.isSpace()){\n            if(wordStartIdx != -1){\n                QStringRef word = str.mid(wordStartIdx, i - wordStartIdx);\n                handleWordEnd(word);\n                wordStartIdx = -1;\n            }\n            writeSpace(c);\n        } else {\n            if(wordStartIdx == -1){\n                wordStartIdx = i;\n            }\n        }\n    }\n    // Final word might not be written yet.\n    // Note that word-breaks spreading over multiple strings (multiples calls of operator<<)\n    // are not correctly handled, if autoSepWords is false.\n    if(wordStartIdx != -1){\n        QStringRef word = str.mid(wordStartIdx);\n        handleWordEnd(word);\n    }\n    // if at beginning of line, words are already separated,\n    // so don't write space in that case\n    if(m_autoSepStreamChunks && m_colNChars != 0){\n        writeSpace(m_streamChunkSep);\n    }\n    return *this;\n}\n\n\n/// Each line in the stream will start with the given string (also applies\n/// to the first line)\nvoid QFormattedStream::setLineStart(const QString &lineStart)\n{\n    m_lineStart = lineStart;\n}\n\n/// Latest after that many characters a word-conscious line-break is\n/// performed. If a word is longer than maxLineWidth, it will be splittet,\n/// so it fits into the minimum possible number of lines.\nvoid QFormattedStream::setMaxLineWidth(int maxLineWidth)\n{\n    m_maxLineWidth = maxLineWidth;\n}\n\n\nvoid QFormattedStream::writeLineStart()\n{\n    m_textStream << m_lineStart;\n    m_colNChars = m_lineStart.size();\n}\n\nvoid QFormattedStream::handleWordEnd(const QStringRef &word)\n{\n    // Check if it fits in current line.\n    if(m_colNChars + word.size() > m_maxLineWidth){\n        if(word.size() + m_lineStart.size() <= m_maxLineWidth){\n            // write it to next line\n            m_textStream << \"\\n\";\n            writeLineStart();\n            m_textStream << word;\n            m_colNChars += word.size();\n        } else {\n            writeLongWord(word);\n        }\n    } else {\n        m_textStream << word;\n        m_colNChars += word.size();\n    }\n}\n\n/// If a word is too large to fit into one line,\n/// print as much into each line as possible.\nvoid QFormattedStream::writeLongWord(const QStringRef &word)\n{\n    // dont use stl-style iterator for compatability with qt-version < 5.4\n    for(int i=0; i < word.size(); i++){\n        const QChar c = word.at(i);\n        if(m_colNChars >= m_maxLineWidth){\n            m_textStream << \"\\n\";\n            writeLineStart();\n        }\n        m_textStream << c;\n        m_colNChars++;\n    }\n}\n\n/// If we are at end of desired width,\n/// always write line feed.\nvoid QFormattedStream::writeSpace(const QChar &c)\n{\n    if(c == QChar::LineFeed || m_colNChars >= m_maxLineWidth){\n        m_textStream << \"\\n\";\n        m_colNChars = 0;\n    } else {\n        m_textStream << c;\n        m_colNChars++;\n    }\n}\n\n\nconst QString &QFormattedStream::lineStart() const\n{\n    return m_lineStart;\n}\n\nint QFormattedStream::maxLineWidth() const\n{\n    return m_maxLineWidth;\n}\n\nQChar QFormattedStream::streamChunkSep() const\n{\n    return m_streamChunkSep;\n}\n\nvoid QFormattedStream::setStreamChunkSep(const QChar &streamChunkSep)\n{\n    m_streamChunkSep = streamChunkSep;\n}\n\n\n"
  },
  {
    "path": "src/common/util/qformattedstream.h",
    "content": "#pragma once\n\n#include <QTextStream>\n\n\n/// Write strings to text-streams with a custom formatting.\n/// Each line can be set to start with an arbitrary string.\n/// If maxLineWidth is set, split a string word-aware once\n/// a line becomes too long.\n/// Strings received during multiple <<-operator-calls are\n/// automatically separated by whitespace (or the desired char), if not already separated\n/// by a character for which QChar::isSpace() returns true.\n/// Note: Avoid using the tab-character as its width is controlled by the terminal.\nclass QFormattedStream\n{\npublic:\n    QFormattedStream(QString *string, QIODevice::OpenMode openMode = QIODevice::ReadWrite);\n    QFormattedStream(FILE *fileHandle, QIODevice::OpenMode openMode = QIODevice::ReadWrite);\n    QFormattedStream(QIODevice *device);\n    QFormattedStream(QByteArray *array, QIODevice::OpenMode openMode = QIODevice::ReadWrite);\n    QFormattedStream(const QByteArray &array, QIODevice::OpenMode openMode = QIODevice::ReadOnly);\n\n    QFormattedStream& operator<<(const QString& str);\n    QFormattedStream& operator<<(const QStringRef& str);\n\n    void setLineStart(const QString &lineStart);\n    void setMaxLineWidth(int maxLineWidth);\n    void setStreamChunkSep(const QChar &streamChunkSep);\n\n    const QString& lineStart() const;\n    int maxLineWidth() const;\n    QChar streamChunkSep() const;\n\n\nprivate:\n    void initCommon();\n    void writeLineStart();\n    void handleWordEnd(const QStringRef &word);\n    void writeLongWord(const QStringRef &word);\n    void writeSpace(const QChar& c);\n\n    QTextStream m_textStream;\n    QString m_lineStart;\n    int m_colNChars; // number of written characters in current line\n    int m_maxLineWidth;\n    bool m_autoSepStreamChunks;\n    QChar m_streamChunkSep;\n};\n"
  },
  {
    "path": "src/common/util/qoutstream.cpp",
    "content": "\n#include <cassert>\n\n#include \"qoutstream.h\"\n#include \"compat.h\"\n\nQOut::QOut() :\n    m_textStream(stdout)\n{\n\n}\n\nQOut::~QOut()\n{\n    m_textStream.flush();\n}\n\n\nQErr::QErr() :\n    m_textStream(stderr)\n{\n\n}\n\nQErr::~QErr()\n{\n    m_textStream.flush();\n}\n\n\n\nstd::function<QString()> QIErr::s_preambleCallback = []() { return \"\"; };\n\n\nQIErr::QIErr() :\n    m_ts(stderr)\n{\n    if(s_preambleCallback){\n        m_ts << s_preambleCallback();\n    }\n}\n\nQIErr::~QIErr()\n{\n    m_ts << Qt::endl;\n}\n\nvoid QIErr::setPreambleCallback(const std::function<QString ()> &f){\n    s_preambleCallback = f;\n}\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "src/common/util/qoutstream.h",
    "content": "#pragma once\n\n#include <QTextStream>\n#include <functional>\n\n\n/// Print QString's and other compatible types\n/// to stdout (flush on destructor).\nclass QOut\n{\npublic:\n    QOut();\n    ~QOut();\n\n    /// See QTextStream::operator<<(const char*) - ISO-8859-1 encoding is assumed.\n    /// We use UTF-8 everywhere.\n    QOut& operator<<(const char* str){\n        return *this << QString::fromUtf8(str);\n    }\n\n    template<class T>\n    QOut& operator<<(const T& t) {\n        m_textStream << t;\n        return *this;\n    }\nprivate:\n    QTextStream m_textStream;\n};\n\n\n/// Print QString's and other compatible types\n/// to stderr (flush on destructor).\nclass QErr\n{\npublic:\n    QErr();\n    ~QErr();\n\n    QErr& operator<<(const char* str){\n        return *this << QString::fromUtf8(str);\n    }\n\n    template<class T>\n    QErr& operator<<(const T& t) {\n        m_textStream << t;\n        return *this;\n    }\nprivate:\n    QTextStream m_textStream;\n};\n\n\n\n/// Informative QErr.\n/// Wrap stderr and add a custom preamble before every stream start.\n/// Print a custom message in the constructor.\n/// Auto-separate words by whitespace.\n/// In destructor, add 'newline '\\n' and flush (endl)\n/// Use it like:\n/// QICerr::setPreambleCallback([]() { return QCoreApplication::applicationName() + \": \"; });\n/// QICerr() << \"Foo\" << \"bar\";\nclass QIErr\n{\npublic:\n    QIErr();\n    ~QIErr();\n\n    QIErr& operator<<(const char* str){\n        return *this << QString::fromUtf8(str);\n    }\n\n    template<class T>\n    QIErr& operator<<(const T& t) {\n        if(m_WrittenTo){\n            // auto whitespace\n            m_ts << ' ';\n        } else {\n            m_WrittenTo = true;\n        }\n        m_ts << t;\n        return *this;\n    }\n\n    static void setPreambleCallback(const std::function<QString()>& f);\nprivate:\n    bool m_WrittenTo {false};\n    QTextStream m_ts;\n    static std::function<QString()> s_preambleCallback;\n\n};\n\n\n\n"
  },
  {
    "path": "src/common/util/staticinitializer.h",
    "content": "#pragma once\n\n#include \"util.h\"\n\n/// Call an arbitrary function in the constructor, which can\n/// be used for static initialization\nclass StaticInitializer\n{\npublic:\n    template<typename Lambda>\n    StaticInitializer(Lambda f){\n        f();\n    }\n\npublic:\n    ~StaticInitializer() = default;\n\npublic:\n    Q_DISABLE_COPY(StaticInitializer)\n    DEFAULT_MOVE(StaticInitializer)\n};\n\n"
  },
  {
    "path": "src/common/util/strlight.cpp",
    "content": "\n#include <cassert>\n\n#include \"strlight.h\"\n\n\n/// Set interal buffer and size. Only allowed\n/// if *this was constructed via the default constructor.\nvoid StrLight::setRawData(const char *buf, StrLight::size_type n){\n    assert( ! m_weOwnBuf || m_buf == nullptr );\n    // we won't change buf, promised (;\n    char* b = const_cast<char*>(buf);\n    m_buf = b;\n    m_size = n;\n}\n\nvoid StrLight::setRawSize(StrLight::size_type n){\n    assert( ! m_weOwnBuf || m_buf == nullptr );\n    m_size = n;\n}\n\n/// Allocate count chars and initialize with ch\nStrLight::StrLight(StrLight::size_type count, char ch)\n{\n    allocatePlusX(count);\n    memset(m_buf, ch, count);\n    setSizeInternal(count);\n}\n\n/// Allocates memory, creates copy of nullterminated cstring.\n/// @param cstring *must not be null*.\nStrLight::StrLight(const char *cstring) :\n    StrLight(cstring, (cstring == nullptr) ? 0 : strlen(cstring))\n{}\n\n/// Allocates memory, creates copy of cstring\nStrLight::StrLight(const char *cstring, StrLight::size_type size)\n{\n    if(cstring == nullptr){\n        throw QExcProgramming(\"cstring == nullptr\");\n    }\n    allocatePlusX(size);\n    memcpy(m_buf, cstring, size);\n    setSizeInternal(size);\n}\n\nStrLight::~StrLight(){\n    if(m_weOwnBuf && m_buf != nullptr){\n        delete[] m_buf;\n    }\n}\n\n/// Warning: if buffer is non-null, copying is only allowed if\n/// buf is managed internally (owned by us). The rationale\n/// is e.g. to no accidantially store a StrLight with\n/// external buffer in a container. On the other hand\n/// it could be supported to copy the external buffer instead,\n/// however, a raw buffer is probably set for performance reasons\n/// so avoid implicit copying (see also deepCopy)\nStrLight::StrLight(const StrLight &other)\n{\n    if(other.m_buf == nullptr){\n        // Nothing to copy\n        return;\n    }\n    if(! other.m_weOwnBuf){\n        throw QExcProgramming(\"Copy constructor called for \"\n                              \"externally managed buffer\");\n    }\n    // copy\n    allocatePlusX(other.m_size);\n    memcpy(m_buf, other.constData(), other.m_size);\n    setSizeInternal(other.m_size);\n}\n\n/// Explicit function to also allow for\n/// easy copying of StrLight's with externally\n/// managed buffer.\nStrLight StrLight::deepCopy() const {\n    if(m_buf == nullptr){\n        return StrLight();\n    }\n    // copy the data: works for internal\n    // and external managed buffer.\n    // Do not call copy-constructor here!\n    StrLight str(m_buf, m_size);\n    return str;\n}\n\nStrLight &StrLight::operator=(StrLight other)\n{\n    swap(*this, other);\n    return *this;\n}\n\nStrLight &StrLight::operator=(char c)\n{\n    this->resize(1);\n    *m_buf = c;\n    return *this;\n}\n\nStrLight &StrLight::operator+=(const StrLight &rhs){\n    append(rhs.constData(), rhs.size());\n    return *this;\n}\n\nStrLight &StrLight::operator+=(const char rhs){\n    char buf[1];\n    buf[0] = rhs;\n    append(buf, 1);\n    return *this;\n}\n\nvoid StrLight::append(const char *str, StrLight::size_type n){\n    auto oldsize = m_size;\n    this->resize( m_size + n);\n    memcpy(&m_buf[oldsize], str, n);\n    assert(m_buf[m_size] == '\\0'); // should be done during resize\n}\n\nconst char& StrLight::operator[](StrLight::size_type idx) const{\n    assert(idx < m_size);\n    return m_buf[idx];\n}\n\n\n\n/// move constructor\nStrLight::StrLight(StrLight &&other)\n    : StrLight()\n{\n    swap(*this, other);\n}\n\nchar StrLight::back() const {\n    assert(m_size > 0);\n    return m_buf[m_size - 1];\n}\n\n/// @return true, if *this object owns\n/// the buffer. Else, the buffer was passed\n/// from outside.\nbool StrLight::bufIsManagedByThis() const {\n    return m_weOwnBuf;\n}\n\nStrLight::size_type StrLight::find(const char *s, StrLight::size_type pos) const\n{\n    assert(m_buf != nullptr);\n    assert(pos < m_size);\n    const char* haystackStart = &m_buf[pos];\n    const char* match = strstr(haystackStart, s);\n    if(match == nullptr) {\n        return StrLight::npos;\n    }\n    return match - m_buf;\n}\n\nStrLight::size_type StrLight::find(const StrLight &s, StrLight::size_type pos) const\n{\n    return StrLight::find(s.constData(), pos);\n}\n\n/// Find the last occurence of ch in str.\n/// @return the found index\nint StrLight::lastIndexOf(char ch) const {\n    for(ssize_type i=m_size - 1; i >= 0; i--){\n        assert(m_buf != nullptr);\n        if(m_buf[i] == ch){\n            return int(i);\n        }\n    }\n    return -1;\n}\n\n/// See QbyteArray::left\nStrLight StrLight::left(int len) const {\n    StrLight s(m_buf, std::min<size_type>(len, m_size));\n    return s;\n}\n\n/// See QbyteArray::mid\nStrLight StrLight::mid(int pos) const {\n    StrLight s(&m_buf[pos], m_size - pos);\n    return s;\n}\n\nbool StrLight::empty() const {\n    return m_size == 0;\n}\n\nStrLight::size_type StrLight::capacity() const {\n    return m_capacity;\n}\n\n/// Warning: only allowed, if we manage the non-null buffer ourselves.\n/// If it is null, create a new buffer.\nvoid StrLight::resize(StrLight::size_type n){\n    reserve(n);\n    setSizeInternal(n);\n}\n\nvoid StrLight::reserve(StrLight::size_type n)\n{\n    if(n < m_capacity){\n        // capacity nonzero -> buf cannot be null\n        assert(m_buf != nullptr);\n        assert(m_weOwnBuf);\n        return;\n    }\n    if(m_buf == nullptr){\n        // no external buffer is set: allocate our own\n        allocatePlusX(n);\n    } else {\n        assert(m_weOwnBuf);\n        realloc(n + 64);\n    }\n}\n\nconst char *StrLight::constData() const {\n    return m_buf;\n}\n\nchar *StrLight::data() {\n    return m_buf;\n}\n\nconst char *StrLight::c_str() const {\n    return constData();\n}\n\n/// Warning: only allowed, if not null and not empty.\n/// Returns pointer to the final char\nconst char *StrLight::constDataEnd() const\n{\n    assert(m_buf != nullptr);\n    assert(! this->empty());\n    return m_buf + m_size - 1;\n}\n\nStrLight::size_type StrLight::size() const {\n    return m_size;\n}\n\nvoid StrLight::pop_back(){\n    assert(m_size > 0);\n    this->resize(m_size - 1);\n}\n\n\n\n/////////// Private ///////////\n\n\nvoid StrLight::realloc(StrLight::size_type newCapacity)\n{\n    assert(m_buf != nullptr);\n    assert(m_weOwnBuf);\n    char* newArr = new char[newCapacity];\n    memcpy(newArr, m_buf, m_size);\n    delete[] m_buf;\n    m_buf = newArr;\n    m_capacity = newCapacity;\n    m_weOwnBuf = true;\n}\n\n/// allocate a little more than needed\nvoid StrLight::allocatePlusX(StrLight::size_type approxNewCapacity)\n{\n    assert(m_buf == nullptr);\n    // Do not change - other functions rely on this\n    approxNewCapacity += 256;\n    m_buf = new char[approxNewCapacity];\n    m_capacity = approxNewCapacity;\n    m_weOwnBuf = true;\n}\n\n/// setting buffersize only allowed if we manage it\nvoid StrLight::setSizeInternal(StrLight::size_type n)\n{\n    assert(m_buf != nullptr);\n    assert(m_weOwnBuf);\n    m_size = n;\n    // otherwise the following would be illegal:\n    assert(m_capacity > m_size);\n    m_buf[n] = '\\0';\n}\n\nvoid swap(StrLight &first, StrLight &second)\n{\n    using std::swap;\n\n    swap(first.m_weOwnBuf, second.m_weOwnBuf);\n    swap(first.m_buf, second.m_buf);\n    swap(first.m_size, second.m_size);\n    swap(first.m_capacity, second.m_capacity);\n}\n\n\n/////////// General ///////////\n\nuint qHash(const StrLight &key, uint seed)\n{\n    if (key.size() == 0)\n        return seed;\n    else\n        return qHashBits(key.constData(), key.size(), seed);\n}\n\nbool operator==(const StrLight &lhs, const StrLight &rhs) {\n    if(lhs.size() != rhs.size()){\n        return false;\n    }\n    if(lhs.size() == 0){\n        // both empty\n        return true;\n    }\n    return memcmp(lhs.constData(), rhs.constData(), lhs.size()) == 0;\n}\n\nbool operator==(const StrLight &lhs, const char &c)\n{\n    return lhs.size() == 1 && *lhs.constData() == c;\n}\n\nbool operator!=(const StrLight &lhs, const char &c)\n{\n    return !(lhs == c);\n}\n\n\nQDebug operator<<(QDebug debug, const StrLight &c)\n{\n    QDebugStateSaver saver(debug);\n    debug.nospace() << QByteArray::fromRawData(c.constData(), int(c.size()));\n    return debug;\n}\n\n\n\nconst StrLight operator+(const StrLight &s1, const StrLight &s2)\n{\n    StrLight res;\n    res.reserve(s1.size() + s2.size());\n    res.append(s1.constData(), s1.size());\n    res.append(s2.constData(), s2.size());\n    return res;\n}\n\nconst StrLight operator+(const StrLight &s1, const char &c)\n{\n    StrLight res;\n    res.reserve(s1.size() + 1);\n    res.append(s1.constData(), s1.size());\n    res += c;\n    return res;\n}\n"
  },
  {
    "path": "src/common/util/strlight.h",
    "content": "#pragma once\n\n#include <cstring>\n#include <qhash.h>\n#include <QDebug>\n\n#include \"exccommon.h\"\n\n/// Yet another String class which aims to perform very fast\n/// in critical sections. For example resizing does not initialize memory\n/// (other than std::string). Further a \"raw\"-mode is supported\n/// (setRawData, setRawSize), where the caller is responsible\n/// for the buffer. Note that the raw mode is only allowed,\n/// if StrLight was default-constructed. If Strlight manages\n/// the memory (bufIsManagedByThis), setRaw* is prohibited.\n/// By using a raw buffer the user clearly shows the intention\n/// to care for performance, so in this mode the copy-constructor\n/// throws! Use the explicit deepCopy in those cases.\nclass StrLight {\npublic:\n    typedef size_t size_type;\n    typedef ssize_t ssize_type;\n    static const size_type\tnpos = static_cast<size_type>(-1);\n\n    StrLight() = default;\n    void setRawData(const char* buf, size_type n);\n    void setRawSize(size_type n);\n\n    StrLight(size_type count, char ch);\n    StrLight(const char* cstring);\n    StrLight(const char* cstring, size_type size);\n\n    ~StrLight();\n\n    StrLight(const StrLight& other);\n    StrLight deepCopy() const;\n    StrLight(StrLight&& other);\n\n    StrLight& operator=(StrLight other);\n    StrLight& operator=(char c);\n\n    StrLight& operator+=(const StrLight& rhs);\n    StrLight& operator+=(const char rhs);\n\n    void append(const char* str, size_type n);\n\n    const char& operator[](size_type idx) const;\n    char back() const;\n    bool bufIsManagedByThis() const;\n\n    size_type find(const char* s, size_type pos = 0) const;\n    size_type find(const StrLight& s, size_type pos = 0) const;\n\n    int lastIndexOf(char ch) const;\n\n    StrLight left(int len) const;\n    StrLight mid(int pos) const;\n    bool empty() const;\n\n    size_type capacity() const;\n    size_type size() const;\n\n    void pop_back();\n    void resize(size_type n);\n    void reserve(size_type n);\n\n    const char *constData() const;\n    char *data();\n    const char *c_str() const;\n    const char* constDataEnd() const;\n\nprivate:\n    void realloc(size_type newCapacity);\n    void allocatePlusX(size_type approxNewCapacity);\n    void setSizeInternal(size_type n);\n\n    bool m_weOwnBuf {false};\n    char* m_buf {nullptr};\n    size_type m_size {0};\n    size_type m_capacity {0};\n\npublic:\n    friend void swap(StrLight& first, StrLight& second);\n};\n\n\nuint qHash(const StrLight &key, uint seed = 0);\n\nnamespace std {\ntemplate<> struct hash<StrLight> {\n    std::size_t operator()(const StrLight& s) const {\n        return qHash(s);\n    }\n};\n}\n\nconst StrLight operator+(const StrLight &s1, const StrLight &s2);\nconst StrLight operator+(const StrLight &s1, const char &c);\n\nbool operator==(const StrLight &lhs, const StrLight &rhs);\nbool operator==(const StrLight &lhs, const char &c);\nbool operator!=(const StrLight &lhs, const char &c);\n\nQDebug operator<<(QDebug debug, const StrLight &c);\n\n\n"
  },
  {
    "path": "src/common/util/strlight_util.cpp",
    "content": "#include <cassert>\n\n#include \"strlight_util.h\"\n\n\n/// Get a str-reference to the file-extension in the canonical\n/// filename src (no trailing slashes, etc). dest must be\n/// a raw Buffer!\nvoid strlight_util::findFileExtension_raw(const StrLight &src, StrLight &dest)\n{\n    if(src.size() < 3 || src.back() == '.'){\n        // smallest possible filname with suffx is x.y -> 3 chars\n        // Last char==dot means no extension.\n        dest.setRawSize(0);\n        return;\n    }\n    assert(src.back() != '/');\n\n    const char* srcEnd = src.constDataEnd();\n    // size - 2 or pEnd - 1, because a final dot is already excluded above\n    for(const char* str = srcEnd - 1; str >= src.constData(); str-- ){\n        if(*str == '/'){\n            // nothing found\n            break;\n        }\n        if(*str == '.'){\n            const char* extStart = str + 1;\n            dest.setRawData(extStart, srcEnd - extStart + 1);\n            return;\n        }\n    }\n    // No file extension found\n    dest.setRawSize(0);\n    return;\n}\n"
  },
  {
    "path": "src/common/util/strlight_util.h",
    "content": "\n#pragma once\n\n#include \"strlight.h\"\n\nnamespace strlight_util {\n\nvoid findFileExtension_raw(const StrLight& src, StrLight& dest);\n\n}\n\n"
  },
  {
    "path": "src/common/util/sys_ioprio.h",
    "content": "/* As of glib 2.28 no wrapper exists for this syscall.\n * The definitions were copied from linux-4.19 include/linux/ioprio.h.\n * The same is also done in\n * ionice from util-linux 4.19 (github.com/karelzak/util-linux)\n *\n */\n\n#pragma once\n\n\n#define IOPRIO_CLASS_SHIFT\t(13)\n#define IOPRIO_PRIO_MASK\t((1UL << IOPRIO_CLASS_SHIFT) - 1)\n\n#define IOPRIO_PRIO_CLASS(mask)\t((mask) >> IOPRIO_CLASS_SHIFT)\n#define IOPRIO_PRIO_DATA(mask)\t((mask) & IOPRIO_PRIO_MASK)\n#define IOPRIO_PRIO_VALUE(class, data)\t(((class) << IOPRIO_CLASS_SHIFT) | data)\nenum {\n    IOPRIO_CLASS_NONE,\n    IOPRIO_CLASS_RT,\n    IOPRIO_CLASS_BE,\n    IOPRIO_CLASS_IDLE,\n};\nenum {\n    IOPRIO_WHO_PROCESS = 1,\n    IOPRIO_WHO_PGRP,\n    IOPRIO_WHO_USER,\n};\n"
  },
  {
    "path": "src/common/util/translation.cpp",
    "content": "#include <clocale>\n#include <cstring>\n\n#include \"translation.h\"\n#include \"util.h\"\n\nstatic locale_t g_locale = nullptr;\n\n\nbool translation::init()\n{\n    if(g_locale == nullptr){\n        g_locale = newlocale(LC_CTYPE_MASK|LC_NUMERIC_MASK|LC_TIME_MASK|\n                    LC_COLLATE_MASK|LC_MONETARY_MASK|LC_MESSAGES_MASK,\n                    \"\",locale_t(nullptr));\n    }\n    return g_locale != locale_t(nullptr);\n}\n\n\n\nchar *translation::strerror_l(int errorNumber)\n{\n    if (g_locale == locale_t(nullptr)) {\n        return ::strerror(errorNumber);\n    }\n    return ::strerror_l(errorNumber, g_locale);\n}\n\n\n\ntranslation::TrSnippets& translation::TrSnippets::instance()\n{\n    static TrSnippets s_instance;\n    return s_instance;\n}\n"
  },
  {
    "path": "src/common/util/translation.h",
    "content": "#pragma once\n\n#include <QString>\n#include <cerrno>\n\n#include \"util.h\"\n\nnamespace translation {\n    bool init();\n\n    char* strerror_l(int errorNumber=errno);\n\n\n    class TrSnippets {\n    public:\n\n\n        const QString enable {qtr(\"enable\")};\n        const QString shournalShellIntegration {qtr(\"shournal shell-integration\")};\n        const QString shournalRestore {qtr(\"shournal-restore\")};\n\n        static TrSnippets &instance();\n\n    public:\n        Q_DISABLE_COPY(TrSnippets)\n        ~TrSnippets() = default;\n    private:\n        TrSnippets() = default;\n    };\n\n} // namespace translation\n\n\n"
  },
  {
    "path": "src/common/util/util.cpp",
    "content": "\n#include <uuid/uuid.h>\n#include <QFileInfo>\n#include <QDir>\n#include <QDebug>\n#include <execinfo.h>\n\n#include \"util.h\"\n#include \"nullable_value.h\"\n\n#include \"os.h\"\n\n/// stream.readLineInto added in qt 5.5, be backwards compatible...\nbool readLineInto(QTextStream& stream, QString *line, qint64 maxlen){\n    #if QT_VERSION < QT_VERSION_CHECK(5, 5, 0)\n    QString str =  stream.readLine(maxlen);\n    if(line != nullptr){\n        *line = str;\n    }\n    return ! str.isNull();\n#else\n    return stream.readLineInto(line, maxlen);\n#endif\n}\n\n\n\n#if QT_VERSION < QT_VERSION_CHECK(5, 6, 0)\nQTextStream& operator<<(QTextStream& stream, const QStringRef &string){\n    for(int i=0; i < string.size(); i++){\n        (stream) << string.at(i);\n    }\n    return stream;\n}\n#endif\n\n\n/// Initialize essential components which are used all over shournal, including\n/// unit-tests.\nbool shournal_common_init()\n{\n    return\n    QMetaType::registerConverter<QString, std::string>( [](const QString& str){\n        return str.toStdString();\n    }) &&\n    QMetaType::registerConverter<std::string, QString>( [](const std::string& str){\n        return QString::fromStdString(str);\n    }) &&\n\n    QMetaType::registerConverter<QString, StrLight>( [](const QString& str){\n        QByteArray b = str.toUtf8();\n        return StrLight(b.constData(), b.size());\n    }) &&\n    QMetaType::registerConverter<StrLight, QString>( [](const StrLight& str){\n        return QString::fromUtf8(str.constData(), int(str.size()));\n    }) &&\n\n    QMetaType::registerConverter<HashValue, QString>( [](const HashValue& val){\n        return ((val.isNull()) ? QString() : QString::number(val.value()));\n    }) &&\n    QMetaType::registerConverter<QString, HashValue>( [](const QString& val){\n        return ((val.isEmpty()) ? HashValue() : HashValue(qVariantTo_throw<HashValue::value_type>(val)));\n    }) ;\n}\n\n\nQDebug &operator<<(QDebug &out, const std::string &str)\n{\n    out << str.c_str();\n    return out;\n}\n\n\nStrLight toStrLight(const QString &str){\n    return StrLight(str.toUtf8()) ;\n}\n\n\nvoid bytesCombine(std::string &){}\n\n/// Find out, if fullstring ends with ending\nbool hasEnding(const std::string &fullString, const std::string &ending) {\n    if (fullString.length() >= ending.length()) {\n        return (0 == fullString.compare (fullString.length() - ending.length(), ending.length(), ending));\n    }\n    return false;\n\n}\n\n\n/// Same as QFileInfo::absoluteFilePath but additionaly\n/// strips a trailing slash, if any, except the path is only\n/// root /\nQString absPath(const QString &path)\n{\n    if(path == \"/\"){\n        return path;\n    }\n    QFileInfo inf(path);\n    QString abs = inf.absoluteFilePath();\n    if(abs.endsWith(\"/\")){\n        abs = abs.left(abs.length() - 1);\n    }\n    return abs;\n}\n\n/// Equivalent of python's uuid1:  'import uuid; print(uuid.uuid1())'\n/// @param madeSafe: pass a bool to know afterwards, whether\n/// the uuid was created in a safe way.\nQByteArray make_uuid(bool *madeSafe){\n    QByteArray uuid;\n    uuid.resize(sizeof (uuid_t));\n\n    int ret = uuid_generate_time_safe(\n                static_cast<uchar*>(static_cast<void*>(uuid.data()))) ;\n    if(madeSafe != nullptr){\n        *madeSafe = ret == 0;\n    }\n    return uuid;\n}\n\nconst char* strDataAccess(const char* str){\n    return str;\n}\n\nchar* strDataAccess(char* str){\n    return str;\n}\n\nchar *strDataAccess(std::string &str){\n    return &str[0];\n}\n\nconst char *strDataAccess(const std::string &str){\n    return str.c_str();\n}\n\n\nchar *strDataAccess(QByteArray &str){\n    return str.data();\n}\nconst char* strDataAccess(const QByteArray& str){\n    return str.constData();\n}\n\nchar *strDataAccess(StrLight &str)\n{\n    return str.data();\n}\n\nconst char *strDataAccess(const StrLight &str)\n{\n    return str.constData();\n}\n\n\n\n\n\n\n/// Constructs a string containing \"null\", if cstr is null,\n/// else the respective value\nstd::string strFromCString(const char *cstr)\n{\n    if(cstr == nullptr) return \"null\";\n    return  cstr;\n}\n\nQPair<std::string, std::string> splitAbsPath(const std::string &fullPath)\n{\n    char sep = '/';\n#ifdef _WIN32\n    sep = '\\\\';\n#endif\n\n    size_t i = fullPath.rfind(sep, fullPath.length());\n    QPair<std::string, std::string> pair;\n    if(i == std::string::npos){\n        pair.first = fullPath;\n        return pair;\n    }\n    if(i == 0){\n        pair.first = \"/\";\n        pair.second = fullPath.substr(1, fullPath.length() - 1);\n        return pair;\n    }\n    pair.first = fullPath.substr(0, i);\n    pair.second = fullPath.substr(i+1, fullPath.length() - i);\n    return pair;\n}\n\n\n/// Hidden files start with a dot, the the first dot is ignored\n/// ( the part before the last dot must not be empty or no file extension\n/// returned)\n/// @param fname: must not contain any os-separator (e.g. /)\nstd::string getFileExtension(const std::string &fname)\n{\n    const auto dotIdx = fname.find_last_of('.');\n    if(dotIdx != std::string::npos && dotIdx != 0){\n        return fname.substr(dotIdx +1);\n    }\n    return \"\";\n}\n\n// maybe_todo: add #IF GCC here to allow for other compiler...\n/// @param startIdx: generally you would want to choose a value\n/// greater than zero, otherwise this function will be added as well.\nstd::string generate_trace_string(int startIdx)\n{\n    const int MAX_STACKTRACE_SIZE = 10;\n\n    void *array[MAX_STACKTRACE_SIZE];\n    char **strings;\n\n    auto size = backtrace (array, MAX_STACKTRACE_SIZE);\n    strings = backtrace_symbols (array, size);\n\n    std::string bt;\n    for (int i = startIdx; i < size; i++){\n        bt += std::string(\" at \") + strings[i] + \"\\n\";\n    }\n    free (strings);\n    return bt;\n}\n\nQString argvToQStr(int argc, char * const argv[]){\n    QStringList l;\n    for(int i=0; i < argc; i++){\n        l.push_back(argv[i]);\n    }\n    return l.join(\" \");\n}\n\nstd::string argvToStr(int argc, char *const argv[])\n{\n    std::string argStr;\n    for(int i=0; i < argc; i++){\n        argStr += std::string(argv[i]) + ' ';\n    }\n    if(! argStr.empty()){\n        // strip final whitespace\n        argStr.resize(argStr.size() - 1);\n    }\n    return argStr;\n}\n\n/// Argv to space-separated string\n/// As usual, argv must be terminated by a final nullptr.\nstd::string argvToStr(char *const argv[])\n{\n    std::string argStr;\n    while(true){\n        if(*argv == nullptr){\n            break;\n        }\n        argStr += std::string(*argv) + ' ';\n        ++argv;\n    }\n    if(! argStr.empty()){\n        // strip final whitespace\n        argStr.resize(argStr.size() - 1);\n    }\n    return argStr;\n}\n\n/// see also: QChar::isSpace\nint indexOfNonWhiteSpace(const QString &str)\n{\n    for(int i=0; i < str.size(); i++){\n        if(! str[i].isSpace()){\n            return i;\n        }\n    }\n    return -1;\n}\n\n\n\nbool qVariantTo(const std::string &str, QString *result) {\n    *result = QString::fromStdString(str);\n    return true;\n}\n\nbool qVariantTo(const StrLight& str, QString* result){\n    *result = QString::fromUtf8(str.constData(), int(str.size()));\n    return true;\n}\n\n\n\n\n"
  },
  {
    "path": "src/common/util/util.h",
    "content": "\n#pragma once\n\n#ifndef qtr\n#define qtr QObject::tr\n#endif\n\n#define GET_VARIABLE_NAME(Variable) (#Variable)\n\n#include <assert.h>\n\n#include <QDateTime>\n#include <QtGlobal>\n#include <QObject>\n#include <QVariant>\n#include <QString>\n#include <QDir>\n#include <QTextStream>\n\n#include <functional>\n#include <memory>\n\n#ifndef likely\n#ifdef __GNUC__\n#define likely(x)       __builtin_expect(!!(x), 1)\n#define unlikely(x)     __builtin_expect(!!(x), 0)\n#else\n#define likely(x)       (x)\n#define unlikely(x)     (x)\n#endif\n#endif\n\n\n#include \"exccommon.h\"\n#include \"UninitializedMemoryHacks.h\"\n#include \"strlight.h\"\n\n\nFOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(signed char)\nFOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(unsigned char)\nFOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(char16_t)\nFOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(char32_t)\nFOLLY_DECLARE_STRING_RESIZE_WITHOUT_INIT(unsigned short)\n\n\n#define DISABLE_MOVE(Class) \\\n    Class(const Class &&) Q_DECL_EQ_DELETE;\\\n    Class &operator=(Class &&) Q_DECL_EQ_DELETE;\n\n#define DEFAULT_MOVE(Class) \\\n    Class(Class &&) noexcept Q_DECL_EQ_DEFAULT;\\\n    Class &operator=(Class &&) noexcept Q_DECL_EQ_DEFAULT ;\n\nbool readLineInto(QTextStream& stream, QString *line, qint64 maxlen = 0);\n\n\n#if QT_VERSION < QT_VERSION_CHECK(5, 6, 0)\n#include <QTextStream>\nQTextStream& operator<<(QTextStream& stream, const QStringRef &string);\n#endif\n\n\n/// @return true, if the given weak pointer is default constructed\ntemplate <typename T>\nbool is_uninitialized(std::weak_ptr<T> const& weak) {\n    using wt = std::weak_ptr<T>;\n    return !weak.owner_before(wt{}) && !wt{}.owner_before(weak);\n}\n\n\nQ_DECLARE_METATYPE(std::string)\nQ_DECLARE_METATYPE(StrLight)\n\nbool shournal_common_init();\n\n#if QT_VERSION < QT_VERSION_CHECK (5, 14, 0)\nnamespace std {\n\n/// Make QString hashable in stl-containers\ntemplate<> struct hash<QString> {\n    std::size_t operator()(const QString& s) const {\n        return qHash(s);\n    }\n};\n}\n#endif\n\nStrLight toStrLight(const QString& str);\n\n/// allow std::string to be printed via qDebug()\nQDebug& operator<<(QDebug& out, const std::string& str);\n\n\n\n/// Common functions to get raw data access for std::string and QByteArray\nconst char* strDataAccess(const char* str);\nchar* strDataAccess(char* str);\nchar* strDataAccess(std::string& str);\nconst char* strDataAccess(const std::string& str);\nchar* strDataAccess(QByteArray& str);\nconst char* strDataAccess(const QByteArray& str);\nchar* strDataAccess(StrLight& str);\nconst char* strDataAccess(const StrLight& str);\n\nint indexOfNonWhiteSpace(const QString& str);\n\n/// return argv as space-separated string\nQString argvToQStr(int argc, char *const argv[]);\n\nstd::string argvToStr(int argc, char * const argv[]);\nstd::string argvToStr(char * const argv[]);\n\ntemplate<class T>\nstd::string bytesFromVar(const T& t)\n{\n    static_assert (std::is_pod<T>(), \"\");\n    const char* raw_ = static_cast<const char*>(static_cast<const void*>(&t));\n    std::string str(raw_, sizeof (T));\n    return str;\n}\n\n/// @return defaultVal if size does not match\ntemplate<class T>\nT varFromBytes(const std::string &str, const T& defaultVal)\n{\n    static_assert (std::is_pod<T>(), \"\");\n    if(str.size() != sizeof (T)){\n        return defaultVal;\n    }\n    const T* pT = static_cast<const T*>(\n                static_cast<const void*>(&str[0])\n            );\n    T copy_ = *pT;\n    return copy_;\n}\n\n\ntemplate<class T>\nQByteArray qBytesFromVar(const T& t)\n{\n    static_assert (std::is_pod<T>(), \"\");\n    const char* raw_ = static_cast<const char*>(static_cast<const void*>(&t));\n    QByteArray str(raw_, sizeof (T));\n    return str;\n}\n\n/// @return defaultVal if size does not match\ntemplate<class T>\nT varFromQBytes(const QByteArray &str, const T& defaultVal)\n{\n    static_assert (std::is_pod<T>(), \"\");\n    if(str.size() != sizeof (T)){\n        return defaultVal;\n    }\n    const T* pT = static_cast<const T*>(\n                static_cast<const void*>(str.constData())\n            );\n    T copy_ = *pT;\n    return copy_;\n}\n\ntemplate<class T>\nT varFromQBytes(const QByteArray &str)\n{\n    static_assert (std::is_pod<T>(), \"\");\n    if(str.size() != sizeof (T)){\n        return T();\n    }\n    const T* pT = static_cast<const T*>(\n                static_cast<const void*>(str.constData())\n            );\n    T copy_ = *pT;\n    return copy_;\n}\n\n\ntemplate<class T> T\nBIT(const T & x)\n{ return T(1) << x; }\n\ntemplate<class T>\nbool IsBitSet(const T & x, const T & y)\n{ return (x & y) != 0; }\n\n/// unset mask in flags;\ntemplate<class T>\nvoid clearBitIn(T& flags, const T& mask){\n    flags &= ~mask;\n}\n\n/// set mask in flags;\ntemplate<class T>\nvoid setBitIn(T& flags, const T& mask){\n    flags |= mask;\n}\n\n\nbool hasEnding(const std::string &fullString, const std::string &ending);\n\n/// recursion end of bytesCombine ...\nvoid bytesCombine(std::string &);\n\n/// Obtain all bytes of all arguments and append them to str\ntemplate<typename First, typename ... Values>\nvoid bytesCombine(std::string & result, First arg, const Values&... rest ){\n    for (size_t idx = 0; idx < sizeof(First); idx++){\n        char byte = *((char *)&arg + idx);\n        result.push_back(byte);\n    }\n    bytesCombine(result, rest...);\n}\n\n\ntemplate <class Container>\nbool contains(const Container& container, const typename Container::value_type& element)\n{\n    return std::find(container.begin(), container.end(), element) != container.end();\n}\n\n/// Drop certain fields of the time, e.g. milliseconds. Also\n/// sets lower fields, e.g. dropping minutes sets seconds\n/// and ms to 0 as well\n/// @param c one of M(inutes), s(econds), m(illiseconds)\nstatic inline void dropFromTime(QTime& t, char c){\n    int m=t.minute(), s=t.second(), ms=t.msec();\n    switch (c) {\n    case 'M': m = 0; break;\n    case 's': s = 0; break;\n    case 'm': ms = 0; break;\n    default: throw QExcIllegalArgument(QString(\"Bad format c %1\").arg(c));\n    }\n    t.setHMS(t.hour(), m, s, ms);\n}\n\n/// @overload\nstatic inline void dropFromTime(QDateTime& d, char c){\n    auto t = d.time();\n    dropFromTime(t, c);\n    d.setTime(t);\n}\n\nQString absPath(const QString& path);\n\n/// Convert the passed value to QVariant, convert\n/// to target-type and return true\n/// on success\ntemplate<typename T>\nbool qVariantTo(QVariant var, T* result) {\n    if(! var.convert(qMetaTypeId<T>())){\n        return false;\n    }\n    *result = var.value<T>();\n    return true;\n}\n\n// Don't forget to register converter functions when adding more types...\nbool qVariantTo(const std::string& str, QString* result);\nbool qVariantTo(const StrLight& str, QString* result);\n\nclass ExcQVariantConvert : public QExcCommon\n{\npublic:\n     using QExcCommon::QExcCommon;\n};\n\n/// Convert to target-type, throw on error\n/// @throws ExcQVariantConvert\ntemplate<typename T>\nvoid qVariantTo_throw(const QVariant& src, T* dst, bool collectStacktrace=true)\n{\n    if(! qVariantTo<T>(src, dst)){\n        const char* targetTypeName = QVariant::fromValue(*dst).typeName();\n        QString actualTypeName;\n        if(targetTypeName == nullptr){\n            actualTypeName = \"invalid/unknown\";\n        } else {\n            actualTypeName = targetTypeName;\n        }\n        QString mesg = qtr(\n                    \"Failed to convert '%1' to type '%2'\"\n                    ).arg(src.toString()).arg(actualTypeName);\n        throw ExcQVariantConvert(mesg, collectStacktrace);\n    }\n}\n\n/// @overload\ntemplate<typename T>\nT qVariantTo_throw(const QVariant& src, bool collectStacktrace=true){\n    T dst;\n    qVariantTo_throw(src, &dst, collectStacktrace);\n    return dst;\n}\n\n/// @overload\ntemplate<typename T>\nT qVariantTo_throw(const std::string& src, bool collectStacktrace=true ){\n    T dst;\n    if(! qVariantTo(src, &dst)){\n        // this should never happen, because of having\n        // specialized for std::string...\n        QString mesg = qtr(\n                    \"Failed to convert std::string to target type.\");\n        throw ExcQVariantConvert(mesg, collectStacktrace);\n    }\n    return dst;\n}\n\n\nQByteArray make_uuid(bool *madeSafe=nullptr);\n\n\n/// Split an absolute path into directory-path and filename:\n/// /home/user/foo -> \"/home/user\", \"foo\"\n/// If no separator is contained, the full path is returned\n/// Works with QByteArray, QString and std::string (see overload)\ntemplate<typename T>\nQPair<T, T> splitAbsPath(const T& path){\n    QPair<T, T> pair;\n    const int lastSlash = path.lastIndexOf('/');\n    if(lastSlash == -1 || lastSlash == int(path.size()) - 1){\n        pair.first = path;\n        return pair;\n    }\n    if(lastSlash == 0){\n        pair.first = \"/\";\n        pair.second = (path.mid(1));\n        return pair;\n    }\n    pair.first = path.left(lastSlash);\n    pair.second = path.mid(lastSlash + 1);\n    return pair;\n}\n\n\ntemplate<typename T>\nT pathJoinFilename(const T& path, const T& filename){\n    assert(path.size() != 0);\n    assert(filename.size() != 0);\n\n    // special case root\n    if(path == \"/\"){\n        return path + filename;\n    }\n    return path + \"/\" + filename;\n}\n\n/// @overload\nQPair<std::string, std::string> splitAbsPath(const std::string& fullPath);\n\n\nstd::string getFileExtension(const std::string& fname);\n\nstd::string strFromCString(const char* cstr);\n\n\nstd::string generate_trace_string(int startIdx=2);\n\n"
  },
  {
    "path": "src/common/util/util_performance.cpp",
    "content": "#include \"util_performance.h\"\n\n\n/// reverse:  reverse string s in place\nvoid util_performance::reverse(char *s, int size)\n{\n    int i, j;\n    char c;\n\n    for (i = 0, j = size-1; i<j; i++, j--) {\n        c = s[i];\n        s[i] = s[j];\n        s[j] = c;\n    }\n}\n\n/// itoa:  convert n to characters in s\n/// flaw: it does not correctly handle the most negative number\nvoid util_performance::itoa(int n, char *s)\n{\n    int i, sign;\n\n    if ((sign = n) < 0)\n        n = -n;\n    i = 0;\n    do {\n        s[i++] = n % 10 + '0';\n    } while ((n /= 10) > 0);\n    if (sign < 0)\n        s[i++] = '-';\n    s[i] = '\\0';\n    reverse(s, i);\n}\n\n/// like itoa but avoid sign checks\nvoid util_performance::uitoa(unsigned n, char *s)\n{\n    int i;\n    i = 0;\n    do {\n        s[i++] = n % 10 + '0';\n    } while ((n /= 10) > 0);\n    s[i] = '\\0';\n    reverse(s, i);\n}\n"
  },
  {
    "path": "src/common/util/util_performance.h",
    "content": "#pragma once\n\n/// Warning. Some of the functions used\n/// here are dangerous and should not\n/// be used in general\nnamespace util_performance  {\n\nvoid reverse(char* s, int size);\n\nvoid itoa(int n, char* s);\nvoid uitoa(unsigned n, char* s);\n\n\n} // namespace util_performance\n"
  },
  {
    "path": "src/common/xxhash_common.c",
    "content": "\n#include \"xxhash_common.h\"\n\n#ifdef __KERNEL__\n\n#include <linux/errno.h>\n#include <linux/fadvise.h>\n#include <linux/fs.h>\n#include <linux/types.h>\n\n\n#include \"kutil.h\"\n#include \"xxhash_shournalk.h\"\n\n#else\n#include <assert.h>\n#include <stdbool.h>\n#include <unistd.h>\n#include <errno.h>\n#include <sys/param.h>\n\n#define xxh64_update  XXH64_update\n#define xxh64_reset   XXH64_reset\n#define xxh64_digest  XXH64_digest\n#define xxh64         XXH64\n#endif\n\n#ifndef min\n#define min MIN\n#endif\n\n#include \"user_kernerl.h\"\n\nstatic ssize_t __do_read(xxh_common_file_t file, void *buf, size_t nbytes){\n#ifdef __KERNEL__\n    return kutil_kernel_read_cachefriendly(file, buf, nbytes, &file->f_pos);\n#else\n    ssize_t ret = read(file, buf, nbytes);\n    if(unlikely(ret < 0)){\n        return -errno;\n    }\n    return ret;\n#endif\n}\n\n\nstatic loff_t __do_seek(xxh_common_file_t file, loff_t offset){\n#ifdef __KERNEL__\n    file->f_pos += offset;\n    return 0;\n    // return vfs_llseek(file, offset, whence);\n#else\n    loff_t ret = lseek(file, offset, SEEK_CUR);\n    if(unlikely(ret < 0)){\n        return -errno;\n    }\n    return ret;   \n#endif\n}\n\n/// read bufsize bytes from file and directly hash them\nstatic ssize_t __read_and_hash(xxh_common_file_t file,\n                                       void* buf, size_t bufsize,\n                                       XXH_COMMON_STATE* xxh_state){\n    ssize_t readBytes = __do_read(file, buf, bufsize);\n    if(likely(readBytes > 0)){\n        long xxh_ret = xxh64_update(xxh_state, buf, readBytes);\n        // we always provide a valid buffer, so no\n        // need to check for errors in production.\n        kuassert(xxh_ret == 0);\n        (void)xxh_ret; // avoid unused warning for release builds\n    }\n    return readBytes;\n}\n\n\n/// read a chunk of bytes from the file and hash it.\n/// Do that multiple times, if the buffer-size is smaller\n/// than the chunksize.\n/// @return The number of read/hashed bytes or a neg. error.\nstatic ssize_t __read_chunk(xxh_common_file_t file,\n                                   struct partial_xxhash* part_hash){\n    ssize_t readBytes;\n\n    if(part_hash->chunksize <= part_hash->bufsize){\n        readBytes = __read_and_hash(file, part_hash->buf, part_hash->chunksize,\n                                    part_hash->xxh_state);\n    } else {        \n        // read and digest immediatly, as our buffer is small\n        size_t missing_bytes = part_hash->chunksize;\n        size_t readsize = part_hash->bufsize;\n        while(true){\n            ssize_t bytes = __read_and_hash(file, part_hash->buf, readsize,\n                                            part_hash->xxh_state);\n            if(unlikely(bytes < 0)) return bytes;\n\n            missing_bytes -= bytes;\n            kuassert(missing_bytes >= 0);\n            if((size_t)bytes < readsize ||\n                missing_bytes == 0){\n                break;\n            }\n            if(missing_bytes < part_hash->bufsize){\n                // almost done. Loop one last time with\n                // a smaller read size\n                readsize = missing_bytes;\n            }\n        }\n        readBytes = part_hash->chunksize - missing_bytes;\n\n    }\n    return readBytes;\n}\n\n\n\n/// XXHASH-digest a whole file or parts of it at regular intervals.\n/// @param file the fildescriptor of the file. Note that in general you would want\n///           to make sure, that the offset is at 0. Note that the offset\n///           may be changed during the call.\n/// @param chunksize size of the chunks to read at once.\n/// @param seekstep Read chunks from the file every seekstep bytes. The read chunk\n///                 does not count into this, so if you actually want to skip bytes,\n///                 seekstep must be greater than chunksize. Otherwise NO SEEK is\n///                 performed at all.\n/// @param maxCountOfReads stop reading and digest after that count of 'read'-\n///                        operations.\n/// @param result write hash and count of read bytes in here\n/// @return 0 on success, else a positive error\nlong partial_xxh_digest_file(xxh_common_file_t file,\n                struct partial_xxhash* part_hash,\n                struct partial_xxhash_result* result\n                )\n{\n    long err;\n    int countOfReads;\n    loff_t net_seek;\n    result->count_of_bytes = 0;\n\n    kuassert(part_hash->max_count_of_reads > 0);\n    kuassert(part_hash->chunksize > 0);\n    kuassert(part_hash->bufsize > 0);\n\n    xxh64_reset(part_hash->xxh_state, 0);\n    net_seek = part_hash->seekstep - part_hash->chunksize;\n\n    for(countOfReads=0; countOfReads < part_hash->max_count_of_reads ; ++countOfReads) {\n        // maybe_todo: preload next chunk with filemap.c:page_cache_read?\n        ssize_t readBytes = __read_chunk(file, part_hash);\n        if(unlikely(readBytes < 0)) return -readBytes;\n        result->count_of_bytes += readBytes;\n        if(readBytes < part_hash->chunksize) {\n            break; // EOF\n        }\n\n        if( net_seek > 0  &&\n                unlikely((err = __do_seek(file, net_seek)) < 0) ) {\n            return -err;\n        }\n    }\n    if(result->count_of_bytes == 0){\n        result->hash = 0;\n    } else {\n        result->hash = xxh64_digest(part_hash->xxh_state);\n    }\n    return 0;\n}\n"
  },
  {
    "path": "src/common/xxhash_common.h",
    "content": "/* Common code for usage both in user and (linux-)kernelspace\n *\n */\n\n#pragma once\n\n\n#ifdef __KERNEL__\n#include \"shournalk_global.h\"\n\n#include <linux/types.h>\nstruct file;\n\n#define XXH_COMMON_STATE struct xxh64_state\ntypedef struct file* xxh_common_file_t;\n\n#else\n\n#include <sys/types.h>\n#include <stdint.h>\n#include \"xxhash.h\"\n\n#define XXH_COMMON_STATE XXH64_state_t\ntypedef int xxh_common_file_t;\n\n#endif // __KERNEL__\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nstruct partial_xxhash {\n    unsigned chunksize; /* read and hash that many bytes per chunk */\n    int max_count_of_reads; /* do not read more than that many chunks */\n    loff_t seekstep; /* determined by file size and max_count_of_reads */\n    XXH_COMMON_STATE* xxh_state;\n    char* buf;\n    size_t bufsize;\n};\n\nstruct partial_xxhash_result {\n    uint64_t hash;\n    unsigned long long count_of_bytes;   // number of read bytes\n};\n\n\nlong partial_xxh_digest_file(xxh_common_file_t file,\n                struct partial_xxhash* part_hash,\n                struct partial_xxhash_result* result);\n\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "src/shell-integration-fanotify/CMakeLists.txt",
    "content": "\ninclude(GenerateExportHeader)\n\n\ninclude_directories(\n    ../common\n    ../common/oscpp\n    ../common/qsimplecfg\n    ../common/qsqlthrow\n    ../common/database\n    )\n\n\nadd_library(libshournal-shellwatch SHARED    \n    libshournal-shellwatch.cpp\n    attached_bash.cpp\n    attached_shell.cpp\n    event_open.cpp\n    event_process.cpp\n    shell_globals.cpp\n    shell_logger.cpp\n    shell_request_handler.cpp\n    )\n\n# to list exported symbols of the compiled .so:\n#  nm -D libshournal-shellwatch.so | grep ' T '\nGENERATE_EXPORT_HEADER(libshournal-shellwatch)\nhide_static_lib_symbols(libshournal-shellwatch)\n\n# manually set the name of the .so -> we need it later\n# Do not change the name libshournal-shellwatch.so, it is hardcoded into the shell-integration-scripts.\nset_target_properties(libshournal-shellwatch PROPERTIES OUTPUT_NAME \"${libshournal_fullname}\")\nset_target_properties(libshournal-shellwatch PROPERTIES PREFIX \"\")\nset_target_properties(libshournal-shellwatch PROPERTIES SUFFIX \"\")\ntarget_link_libraries(libshournal-shellwatch\n    ${CMAKE_DL_LIBS} # dlsym\n    # Using the static lib_shournal in our shared library exposes\n    # all symbols (not a good idea in a LD_PRELOAD-lib).\n    # Hiding them requires either a --version-script\n    # or all libraries being compiled with -fvisibility=hidden. The former\n    # approach seems more elegant.\n    # See also https://stackoverflow.com/a/22110050/7015849\n    \"-Wl,--version-script=${CMAKE_CURRENT_SOURCE_DIR}/libshellwatch.version\"\n    lib_shournal_common\n    uuid\n)\n\n\n########################## Installation ##########################\n\ninstall(TARGETS libshournal-shellwatch\n     DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/${PROJECT_NAME}\n)\n\n\n\n\n\n"
  },
  {
    "path": "src/shell-integration-fanotify/attached_bash.cpp",
    "content": "\n#include <QDebug>\n#include <dlfcn.h>\n\n#include \"logger.h\"\n\n\n#include \"attached_bash.h\"\n#include \"os.h\"\n\nstatic int read_seq(){\n    const char* _LIBSHOURNAL_SEQ_COUNTER = \"_LIBSHOURNAL_SEQ_COUNTER\";\n    const char* seq_val = getenv(_LIBSHOURNAL_SEQ_COUNTER);\n    if(seq_val == nullptr){\n        logWarning << qtr(\"Required environment variable '%1' \"\n                          \"is unset.\").arg(_LIBSHOURNAL_SEQ_COUNTER);\n        return -1;\n    }\n    int seq;\n    try {\n        qVariantTo_throw(seq_val, &seq);\n    } catch (const ExcQVariantConvert& ex) {\n        logWarning << \"Failed to convert sequnce:\"\n                   << ex.descrip();\n        return -1;\n    }\n    return seq;\n}\n\n/// @throws ExcOs\nAttachedBash::AttachedBash() :\n    m_lastSeq(1)\n{}\n\nvoid AttachedBash::handleEnable()\n{\n    m_lastSeq = read_seq();\n}\n\n/// The command is considered valid, if the command-counter\n/// has changed since the last call of this function or handleEnable().\n/// This function is meant to be called only *once*\n/// per command sequence.\nbool AttachedBash::cmdCounterJustIncremented()\n{\n    int current_seq = read_seq();\n    if(current_seq == -1){\n        return false; // error\n    }\n    if(current_seq == m_lastSeq){\n        return false;\n    }\n    m_lastSeq = current_seq;\n    return true;\n}\n"
  },
  {
    "path": "src/shell-integration-fanotify/attached_bash.h",
    "content": "#pragma once\n\n#include \"attached_shell.h\"\n\n/// We read the env-variable set in bash's PS0\n/// in order to prepare the observation of the next command\n/// sequence. We cannot do this easily from within a function\n/// called in PS0, because that is run in a subshell.\n/// Another possibility is to run a signal handler but there we must\n/// again be careful not to interfere with custom handlers of the\n/// user.\n/// counter=0\n/// trap_handler(){\n///     counter=$((counter+1))\n///     echo \"hi from trap_handler: $counter: $(history 1)\" >&2\n/// }\n/// trap trap_handler SIGRTMIN\n/// PS0='$(echo \"sending signal... \"; kill -SIGRTMIN  $$; )'\nclass AttachedBash : public AttachedShell\n{\npublic:\n    AttachedBash();\n\n    void handleEnable() override;\n\n    bool cmdCounterJustIncremented() override;\n\nprivate:\n    int m_lastSeq;\n};\n\n"
  },
  {
    "path": "src/shell-integration-fanotify/attached_shell.cpp",
    "content": "\n#include \"attached_shell.h\"\n\n\nvoid AttachedShell::handleEnable()\n{}\n\n/// This function is meant to be called only *once*\n/// per command sequence.\nbool AttachedShell::cmdCounterJustIncremented()\n{\n    return false;\n}\n"
  },
  {
    "path": "src/shell-integration-fanotify/attached_shell.h",
    "content": "#pragma once\n\n#include \"util.h\"\n\n/// Abstract base class for shells\nclass AttachedShell\n{\npublic:\n    AttachedShell() = default;\n    virtual ~AttachedShell() = default;\n\n    virtual void handleEnable();\n    virtual bool cmdCounterJustIncremented();\n\npublic:\n    Q_DISABLE_COPY(AttachedShell)\n    DEFAULT_MOVE(AttachedShell)\n};\n"
  },
  {
    "path": "src/shell-integration-fanotify/event_open.cpp",
    "content": " #define _LARGEFILE64_SOURCE\n\n#include <climits>\n#include <cstdlib>\n#include <cassert>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n\n#include \"event_open.h\"\n#include \"logger.h\"\n\n#include \"shell_globals.h\"\n#include \"cleanupresource.h\"\n#include \"qoutstream.h\"\n#include \"osutil.h\"\n#include \"shell_request_handler.h\"\n#include \"shell_logger.h\"\n#include \"translation.h\"\n\nusing shell_request_handler::ShellRequest;\nusing shell_request_handler::checkForTriggerAndHandle;\n\n/// @return absolute version of the passed path or an empty string in case\n/// of an error.\nstatic std::string mkAbsPath(const char* path){\n    if(path[0] == '/'){\n        return path;\n    }\n    std::string buf(PATH_MAX, '\\0');\n\n    char* rawBuf = strDataAccess(buf);\n    if(getcwd(rawBuf, buf.size()) == nullptr){\n        logWarning << qtr(\"Failed to resolve relative path %1. \"\n                       \"The working-directory could not be determined (%2). \"\n                       \"File events will not be registered.\")\n                   .arg(path, translation::strerror_l());\n        return {};\n    }\n    if(rawBuf[0] != '/'){\n        // see also man 3 getcwd\n        logWarning << qtr(\"Failed to resolve relative path %1. \"\n                       \"The working-directory does not begin with '/' but %2. \"\n                       \"File events will not be registered.\")\n                   .arg(path, rawBuf);\n        return {};\n    }\n\n    // resize to actual length\n    buf.resize(strlen(rawBuf));\n    if(buf.size() != 1){\n        buf += '/';\n    }\n    buf += path;\n    return buf;\n}\n\n/// Write to a new unnamed tmp-file, if the shell-request\n/// was successful. Note that the shell will close the fd\n/// for us later.\n/// @return fd to deleted tmp-file.\nstatic int writerTriggerResponse(bool success){\n    int fd =  osutil::unnamed_tmp();\n    std::string mesg = (success) ? \"ok\" : \"fail\";\n    // write string null-terminated (size+1) so in the shell\n    // we can read -d '' trigger_response < '_///shournal_trigger_response///_'\n    os::write(fd, mesg.c_str(), mesg.size() + 1);\n    os::lseek(fd, 0, SEEK_SET);\n    return fd;\n}\n\nint event_open::handleOpen(const char *pathname, int flags, mode_t mode, bool largeFile)\n{\n    if(largeFile){\n        setBitIn(flags, O_LARGEFILE);\n    }\n\n    auto& g_shell = ShellGlobals::instance();\n    if(g_shell.ignoreEvents.test_and_set()){\n        return g_shell.orig_open(pathname, flags, mode);\n    }\n    auto clearIgnEvents = finally([&g_shell] { g_shell.ignoreEvents.clear(); });\n\n    // Note: we only process shell-request if the trigger env-variable is set AND the current\n    // pathname is _///shournal_trigger_response///_\n    // So check for the pathname before handling the request in\n    // checkForTriggerAndHandle (this is for cases where the trigger variable is set\n    // and other redirections occurr in between).\n    if(strcmp(pathname, \"_///shournal_trigger_response///_\") == 0){\n        bool shellRequestSuccess = false;\n        auto shellRequest = checkForTriggerAndHandle(&shellRequestSuccess);\n        switch (shellRequest) {\n            case ShellRequest::TRIGGER_UNSET: break;\n            default:\n                return writerTriggerResponse(shellRequestSuccess);\n            }\n    }\n\n    if(g_shell.watchState != E_WatchState::WITHIN_CMD){\n        shell_earlydbg(\"ignoring pathname %s (not WITHIN_CMD)\", pathname);\n        return g_shell.orig_open(pathname, flags, mode);\n    }\n    const auto absPath = mkAbsPath(pathname);\n\n    // pass the resolved abs. path relative to shournal's root directory fd,\n    // by omitting the initial '/'.\n    // Users may further pass malformed file-paths such as //foo, so find the first\n    // non-slash char.\n    const char* actualPath = nullptr;\n    for(size_t i=0; i < absPath.size(); i++){\n        if(absPath[i] != '/'){\n            actualPath = &absPath[i];\n            break;\n        }\n    }\n\n    if(actualPath == nullptr || absPath.c_str() + absPath.size() - actualPath < 1){\n        // Get here on mkAbsPath-error or because user attempted to open \"/\" or \"\"\n        // The shortest possible absolute FILEpath under linux is two chars long.\n        // We may get here, if bash-user calls e.g.\n        // while read line; do echo $line ; done < \"/\"\n        logDebug << \"no valid path\" << absPath;\n        return g_shell.orig_open(pathname, flags, mode);\n    }\n\n    logDebug << \"about to open\" << actualPath - 1;\n    return openat(g_shell.shournalRootDirFd, actualPath, flags, mode);\n}\n\n"
  },
  {
    "path": "src/shell-integration-fanotify/event_open.h",
    "content": "\n#pragma once\n\n#include <sys/stat.h>\n\nnamespace event_open {\n\nint handleOpen(const char *pathname, int flags, mode_t mode, bool largeFile );\n\n}\n\n"
  },
  {
    "path": "src/shell-integration-fanotify/event_process.cpp",
    "content": "\n#include <cassert>\n#include <QStandardPaths>\n#include <QCoreApplication>\n#include <sys/socket.h>\n#include <csignal>\n\n#include \"attached_bash.h\"\n#include \"cleanupresource.h\"\n#include \"logger.h\"\n\n#include \"event_process.h\"\n\n#include \"shell_globals.h\"\n#include \"settings.h\"\n#include \"excos.h\"\n#include \"qsimplecfg/exccfg.h\"\n#include \"subprocess.h\"\n#include \"osutil.h\"\n#include \"fdcommunication.h\"\n#include \"util.h\"\n#include \"commandinfo.h\"\n#include \"app.h\"\n#include \"shell_logger.h\"\n#include \"translation.h\"\n#include \"qoutstream.h\"\n#include \"shell_request_handler.h\"\n\n\n\nstatic int execveUnobserved(const char *filename, char * const argv[], char * const envp[]){\n    auto& g_shell = ShellGlobals::instance();\n    logDebug << __func__ << filename;\n\n    auto sockFlags = g_shell.shournalSockFdDescripFlags;\n    setBitIn(sockFlags, FD_CLOEXEC);\n    os::setFdDescriptorFlags(g_shell.shournalSocketNb, sockFlags);\n    // in case execve fails, restore flags.\n    auto resetCLOEXEC = finally([&g_shell] {\n        try {\n            os::setFdDescriptorFlags(g_shell.shournalSocketNb,\n                                     g_shell.shournalSockFdDescripFlags);\n        } catch (std::exception& e) {\n            logCritical << e.what();\n        }\n\n    });\n    return g_shell.orig_execve(filename, argv, envp);\n}\n\n\npid_t event_process::handleFork()\n{\n    auto& g_shell = ShellGlobals::instance();\n    if( g_shell.ignoreEvents.test_and_set()){\n        return g_shell.orig_fork();\n    }\n    auto clearIgnEvents = finally([&g_shell] {g_shell.ignoreEvents.clear(); });\n\n    if( g_shell.inParentShell &&\n         g_shell.watchState == E_WatchState::INTERMEDIATE &&\n          dynamic_cast<const AttachedBash*>(g_shell.pAttchedShell) != nullptr &&\n         g_shell.pAttchedShell->cmdCounterJustIncremented()){\n        shell_request_handler::handlePrepareCmd();\n    }\n\n    pid_t ret = g_shell.orig_fork();\n    if(ret == 0){\n        if(g_shell.shellParentPid != 0){\n            // our parent shell is initialized, so we can't be it\n            g_shell.inParentShell = false;\n        }\n    }\n    return ret;\n}\n\n\n\nint event_process::handleExecve(const char *filename, char * const argv[], char * const envp[])\n{\n    auto& g_shell = ShellGlobals::instance();\n    if(g_shell.ignoreEvents.test_and_set()){\n        return g_shell.orig_execve(filename, argv, envp);\n    }\n    auto clearIgnEvents = finally([&g_shell] {g_shell.ignoreEvents.clear(); });\n\n    if( g_shell.inParentShell ||\n         g_shell.watchState != E_WatchState::WITHIN_CMD){\n        shell_earlydbg(\"ignore execve of %s\", filename);\n        return g_shell.orig_execve(filename, argv, envp);\n    }\n\n    // No point in observing an unstattable executable.\n    // Further, do not monitor suid-applications. Note: this is, of course, *not*\n    // a security-feature, however, events by other users are in\n    // relevant cases not recorded by shournal anyway.\n    struct stat st;\n    if(stat(filename, &st) == -1 || IsBitSet(st.st_mode, mode_t(S_ISUID) ) ){\n        return execveUnobserved(filename, argv, envp);\n    }\n\n    std::string filenameStr(filename);\n    auto& sets = Settings::instance();\n    if(sets.ignoreCmdsRegardslessOfArgs().find(filenameStr) !=\n            sets.ignoreCmdsRegardslessOfArgs().end()){\n        return execveUnobserved(filename, argv, envp);\n    }\n    std::string fullCmd;\n\n    QVarLengthArray<const char*, 8192> args;\n\n    args.push_back(app::SHOURNAL_RUN_FANOTIFY);\n    args.push_back(\"--msenter\");\n    std::string pid = std::to_string(g_shell.lastMountNamespacePid);\n    args.push_back(pid.c_str());\n\n    args.push_back(\"--verbosity\");\n    args.push_back(g_shell.shournalRunVerbosity.c_str());\n\n    args.push_back(\"--env\");\n    // first value after --env is its size, which we don't know yet.\n    args.push_back(\"DUMMY\");\n    int envSizeIdx = args.size() -1;\n\n    // set shournal socket only for observed processes (do not add to\n    // shell env).\n    const std::string shournalSocketNbStr = std::string(app::ENV_VAR_SOCKET_NB) + '=' +\n                                      std::to_string(g_shell.shournalSocketNb);\n    args.push_back(shournalSocketNbStr.c_str());\n\n    for(char* const *e = envp; *e != nullptr; e++) {\n        args.push_back(*e);\n    }\n    // optimization in shournal-run...\n    args.push_back(\"SHOURNAL_DUMMY_NULL=1\");\n    std::string envSize = std::to_string(args.size() - envSizeIdx - 1);\n    args[envSizeIdx] = envSize.c_str();\n\n    args.push_back(\"--exec-filename\");\n    args.push_back(filename);\n\n    args.push_back(\"--exec\");\n    fullCmd += filenameStr + ' ';\n\n    for(int i=0; ; i++) {\n        // include final nullptr here\n        args.push_back(argv[i]);\n        if(argv[i] == nullptr){\n            break;\n        }\n        if(i > 0){\n            // for the ignore-list skip argv0 which should be the same\n            // as filename in most cases anyway.\n            fullCmd.append(argv[i]);\n            fullCmd += ' ';\n        }\n    }\n    // strip final whitespace\n    fullCmd.pop_back();\n    if(sets.ignoreCmds().find(fullCmd) !=\n            sets.ignoreCmds().end()){\n        logDebug << \"exec UNobserved:\" << fullCmd.c_str();\n        return execveUnobserved(filename, argv, envp);\n    }\n\n    logDebug << \"execvpe observed:\" << fullCmd.c_str();\n    try {\n        os::exec(args, envp);\n    } catch (const os::ExcOs& e) {\n        logCritical << qtr(\"Failed to launch %1 with external program. \"\n                           \"Please make sure %2 is in your PATH: %3. \"\n                           \"Running it unobserved instead...\")\n                       .arg(filename, app::SHOURNAL_RUN_FANOTIFY, e.what());\n    }\n    return execveUnobserved(filename, argv, envp);\n}\n\n\n\n"
  },
  {
    "path": "src/shell-integration-fanotify/event_process.h",
    "content": "#pragma once\n\n#include <sched.h>\n\nnamespace event_process {\n\npid_t handleFork();\n\nint handleExecve(const char *filename, char *const argv[],\n               char *const envp[]);\n}\n\n"
  },
  {
    "path": "src/shell-integration-fanotify/libshellwatch.version",
    "content": "{\n  global: open; open64; fork; execve; strcpy;\n  local: *;         # hide everything else\n};\n"
  },
  {
    "path": "src/shell-integration-fanotify/libshournal-shellwatch.cpp",
    "content": "\n// necessary for RTLD_NEXT in dlfcn.h\n#ifndef _GNU_SOURCE\n#define _GNU_SOURCE\n#endif\n#include <dlfcn.h>\n#include <exception>\n#include <cstdio>\n#include <iostream>\n\n#include \"cleanupresource.h\"\n#include \"event_open.h\"\n#include \"event_process.h\"\n#include \"staticinitializer.h\"\n#include \"shell_globals.h\"\n#include \"shell_logger.h\"\n\n// cmake export-symbol control:\n#include \"libshournal-shellwatch_export.h\"\n\n\n/// Initalize the original functions close, fclose ...\n/// One might think it was a good idea, to initialize the functions\n/// in gcc's __attribute__((constructor)). This is too late,\n/// at that time fclose/close was already called several times.\n/// One has to be _extremely_ careful not to call anything which invokes\n/// one of the below preloaded functions (open, fork...) in here,\n/// otherwise we're lost.\nstatic void initSymIfNeeded(){\n    static StaticInitializer loader( [](){\n        try {\n            ShellGlobals& g_shell = ShellGlobals::instance();\n            g_shell.orig_fork = reinterpret_cast<fork_func_t>(os::dlsym(RTLD_NEXT, \"fork\"));\n            g_shell.orig_execve = reinterpret_cast<execve_func_t>(os::dlsym(RTLD_NEXT, \"execve\"));\n            g_shell.orig_open = reinterpret_cast<open_func_t>(os::dlsym(RTLD_NEXT, \"open\"));\n            // globals.orig_fopen = reinterpret_cast<fopen_func_t>(os::dlsym(RTLD_NEXT, \"fopen\"));\n            g_shell.orig_strcpy = reinterpret_cast<strcpy_func_t>(os::dlsym(RTLD_NEXT, \"strcpy\"));\n\n            return;\n        } catch(const std::exception& ex){\n            fprintf(stderr,\n                    \"shournal shell integration fatal error: \"\n                    \"failed to load original symbols, expect \"\n                    \"the worst: %s\", ex.what());\n        }\n    });\n\n#ifndef NDEBUG\n    // Ignoring events is maybe not strictly necessary here,\n    // but better safe than sorry.\n    ShellGlobals& g_shell = ShellGlobals::instance();\n    if(g_shell.ignoreEvents.test_and_set()){\n        return;\n    }\n    auto clearIgnEvents = finally([&g_shell] { g_shell.ignoreEvents.clear(); });\n\n    static StaticInitializer initPrintDbg( [](){\n        shell_earlydbg(\"initalizing libshournal-shellwatch.so for pid %d %s\",\n                       os::getpid(),\n                       os::readlink<std::string>(\"/proc/self/exe\").c_str());\n    });\n#endif\n\n}\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n\nLIBSHOURNAL_SHELLWATCH_EXPORT\nint open(const char *pathname, int flags, mode_t mode) {\n    // std::cerr << __func__ << \"\\n\";\n    initSymIfNeeded();\n    try{\n        return event_open::handleOpen(pathname, flags, mode, false);\n    } catch (const std::exception& ex ) {\n        std::cerr << __func__ << \" fatal: \" << ex.what() << \"\\n\";\n    }\n    return ShellGlobals::instance().orig_open(pathname, flags, mode);\n}\n\n\nLIBSHOURNAL_SHELLWATCH_EXPORT\nint open64(const char *pathname, int flags, mode_t mode) {\n    // std::cerr << __func__ << \"\\n\";\n    initSymIfNeeded();\n    try{\n        // probably O_LARGEFILE should only be set, if we are running in 32\n        // bit mode (using open64). It seems to do no harm though (see handleOpen).\n        return event_open::handleOpen(pathname, flags, mode, true);\n    } catch (const std::exception& ex ) {\n        std::cerr << __func__ << \" fatal: \" << ex.what() << \"\\n\";\n    }\n    return ShellGlobals::instance().orig_open(pathname, flags, mode);\n}\n\n// There seems to be no point in observing fopen - browsing the source-code\n// of bash, zsh, kash, csh, .. all relevant user file activity is handled via\n// the 'open' library-call. If one day it would be observed anyway: the shell's\n// seem to not make use of any (g)libc-fopen-mode-extensions like 'c' or ,ccs=string.\n// As such the translation of w,r,a etc. to O_WRONLY,O_RDONLY, etc. is pretty\n// straight forward. Otherwise things get more complicated - fdopen does not handle\n// all the cases.\n// LIBSHOURNAL_SHELLWATCH_EXPORT\n// FILE* fopen(const char *path, const char *mode) {\n//     // std::cerr << __func__ << \"\\n\";\n//     try{\n//         initSymIfNeeded();\n//         return event_open::handleFopen(path, mode);\n//     } catch (const std::exception& ex ) {\n//         std::cerr << __func__ << \" fatal: \" << ex.what() << \"\\n\";\n//     }\n//     return nullptr;\n// }\n\n\n// see comment for fopen.\n// LIBSHOURNAL_SHELLWATCH_EXPORT\n// FILE* fopen64(const char *path, const char *mode) {\n//     // initIfNeeded();\n//     // FILE* f = orig_fopen64(path, mode);\n//     // if(f != NULL){\n//     //     handleOpen(fileno(f));\n//     // }\n//     return f;\n// }\n\n\n\n\nLIBSHOURNAL_SHELLWATCH_EXPORT\npid_t fork(){\n    initSymIfNeeded();\n    try {\n        return event_process::handleFork();\n    } catch (const std::exception& ex ) {\n        std::cerr << __func__ << \" fatal: \" << ex.what() << \"\\n\";\n    }\n    return ShellGlobals::instance().orig_fork();\n\n}\n\nLIBSHOURNAL_SHELLWATCH_EXPORT\nint execve(const char *filename, char *const argv[],\n           char *const envp[]){\n    initSymIfNeeded();\n    try {\n        return event_process::handleExecve(filename, argv, envp);\n    } catch (const std::exception& ex ) {\n        std::cerr << __func__ << \" fatal: \" << ex.what() << \"\\n\";\n    }\n    return ShellGlobals::instance().orig_execve(filename, argv, envp);\n}\n\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "src/shell-integration-fanotify/shell_globals.cpp",
    "content": "\n#include \"app.h\"\n#include \"qoutstream.h\"\n#include \"shell_globals.h\"\n#include \"shell_logger.h\"\n#include \"staticinitializer.h\"\n#include \"translation.h\"\n\nconst char* ENV_VARNAME_SHELL_VERBOSITY = \"_SHOURNAL_LIB_SHELL_VERBOSITY\";\n\nstatic bool updateShouranlRunVerbosityFromEnv(bool verboseIfUnset){\n    const char* VERB_VARNAME = \"_SHOURNAL_VERBOSITY\";\n    const char* verbosityValue = getenv(VERB_VARNAME);\n    if(verbosityValue == nullptr){\n        if(verboseIfUnset){\n            logWarning << qtr(\"Required verbosity environment variable '%1' \"\n                              \"is unset.\").arg(VERB_VARNAME);\n            return false;\n        }\n        return true;\n    }\n    if(app::VERBOSITIES.find(verbosityValue) == app::VERBOSITIES.end()){\n        logWarning << qtr(\"Verbosity environment variable '%1' \"\n                          \"is invalid ('%2')\").arg(VERB_VARNAME, verbosityValue);\n        return false;\n    }\n    auto& g_shell = ShellGlobals::instance();\n    g_shell.shournalRunVerbosity = verbosityValue;\n    return true;\n}\n\n\nShellGlobals &ShellGlobals::instance()\n{\n    static ShellGlobals s;\n    return s;\n}\n\nShellGlobals::ShellGlobals()\n{\n     ignoreEvents.clear();\n     ignoreSigation.clear();\n}\n\nbool ShellGlobals::performBasicInitIfNeeded(){\n    bool success = true;\n    static StaticInitializer initOnFirstCall( [&success](){\n        app::setupNameAndVersion(\"shournal shell-integration\");\n        try {\n            if(! shournal_common_init()){\n                QIErr()  << qtr(\"Fatal error: failed to initialize custom Qt conversion functions\");\n            }\n            shell_logger::setup();\n            translation::init();\n            updateVerbosityFromEnv(false);\n        } catch (const std::exception& ex) {\n            success = false;\n            logCritical << ex.what();\n        }\n\n     });\n    return success;\n}\n\n\n/// @param verboseIfUnset If true print a warning if the environment variables\n/// are unset.\n/// @return if verboseIfUnset: return false if unset or invalid\n///         else             : return false if invalid\nbool ShellGlobals::updateVerbosityFromEnv(bool verboseIfUnset){\n    bool shournalRunSuccess = updateShouranlRunVerbosityFromEnv(verboseIfUnset);\n\n    const char* VERB_VARNAME = ENV_VARNAME_SHELL_VERBOSITY;\n    const char* verbosityValue = getenv(VERB_VARNAME);\n    if(verbosityValue == nullptr){\n        if(verboseIfUnset){\n            logWarning << qtr(\"Required verbosity environment variable '%1' \"\n                              \"is unset.\").arg(VERB_VARNAME);\n            return false;\n        }\n        return shournalRunSuccess;\n    }\n    if(app::VERBOSITIES.find(verbosityValue) == app::VERBOSITIES.end()){\n        logWarning << qtr(\"Verbosity environment variable '%1' \"\n                          \"is invalid ('%2')\").arg(VERB_VARNAME, verbosityValue);\n        return false;\n    }\n    auto& g_shell = ShellGlobals::instance();\n    g_shell.verbosityLevel = logger::strToMsgType(verbosityValue);\n\n    return shournalRunSuccess;\n}\n\n"
  },
  {
    "path": "src/shell-integration-fanotify/shell_globals.h",
    "content": "#pragma once\n\n#include <csignal>\n#include <sched.h>\n#include <atomic>\n#include <QByteArray>\n#include <QDateTime>\n#include <mutex>\n\n#include \"attached_shell.h\"\n#include \"fdcommunication.h\"\n#include \"logger.h\"\n#include \"os.h\"\n#include \"sessioninfo.h\"\n#include \"util.h\"\n\n\ntypedef pid_t (*fork_func_t)();\n\ntypedef int (*execve_func_t)(const char *filename, char *const argv[],\n                  char *const envp[]);\n\ntypedef int (*open_func_t)(const char *pathname, int flags, mode_t mode);\ntypedef char * (*strcpy_func_t)(char *, const char*);\n\nextern const char* ENV_VARNAME_SHELL_VERBOSITY;\n\nenum class E_WatchState {DISABLED, WITHIN_CMD, INTERMEDIATE, ENUM_END};\n\n\nclass ShellGlobals\n{\n\npublic:\n    static ShellGlobals& instance();\n    static bool performBasicInitIfNeeded();\n    static bool updateVerbosityFromEnv(bool verboseIfUnset);\n\n    int shournalSocketNb {-1};\n    fork_func_t orig_fork {};\n    execve_func_t orig_execve {};\n    open_func_t orig_open {};\n    strcpy_func_t orig_strcpy {};\n\n    std::atomic_flag ignoreEvents{};\n\n    E_WatchState watchState {E_WatchState::DISABLED};\n    bool inParentShell {false};\n    fdcommunication::SocketCommunication shournalSocket;\n    pid_t lastMountNamespacePid {-1};\n\n    struct sigaction origSigintAction{};\n    std::atomic_flag ignoreSigation{};\n\n    AttachedShell* pAttchedShell {};\n    QtMsgType verbosityLevel {QtMsgType::QtWarningMsg};\n    std::string shournalRunVerbosity {logger::msgTypeToStr(QtWarningMsg)};\n    int shournalSockFdDescripFlags {-1};\n\n    SessionInfo sessionInfo;\n    int shournalRootDirFd {-1};\n\n    pid_t shellParentPid {0};\n\npublic:\n    ~ShellGlobals() = default;\n    Q_DISABLE_COPY(ShellGlobals)\n    DISABLE_MOVE(ShellGlobals)\n\nprivate:\n    ShellGlobals();\n\n};\n\n\n"
  },
  {
    "path": "src/shell-integration-fanotify/shell_logger.cpp",
    "content": "#include <stdarg.h>\n\n#include <QtGlobal>\n#include <QDateTime>\n#include <QFileInfo>\n\n#include \"app.h\"\n#include \"shell_logger.h\"\n#include \"qoutstream.h\"\n#include \"shell_globals.h\"\n#include \"fdcommunication.h\"\n#include \"logger.h\"\n#include \"socket_message.h\"\n\nusing socket_message::E_SocketMsg;\n\nnamespace {\n\nstruct ShellLogState {\n    QString logPreamble;\n    QVarLengthArray<QByteArray, 8192> bufferedMessages;\n};\n\nShellLogState& sLogState(){\n    static ShellLogState s;\n    return s;\n}\n\n\nvoid sendViaSock(QByteArray& msg){\n    try {\n        ShellGlobals::instance().shournalSocket.sendMsg({int(E_SocketMsg::LOG_MESSAGE),\n                                              msg} );\n    } catch (const os::ExcOs& e) {\n        QIErr() << \"Failed to send message via socket:\" << e.what();\n    }\n}\n\n\nvoid messageHandler(QtMsgType msgType, const QMessageLogContext &context, const QString &msg)\n{\n    auto& g_shell = ShellGlobals::instance();\n    int desiredVerbosity = logger::msgTypeToOrdinal(g_shell.verbosityLevel);\n    int typeOrdinal = logger::msgTypeToOrdinal(msgType);\n\n#ifndef NDEBUG\n    if (msgType == QtDebugMsg) {\n        if(typeOrdinal >= desiredVerbosity){\n            QErr() << sLogState().logPreamble << \" Dbg: \"\n                   << \"(\" << QFileInfo(context.file).fileName() <<\":\" << context.line << \") \"\n                   << \"pid \" << getpid() << \": \"\n                   << msg << '\\n' ;\n        }\n        return;\n    }\n#else\n    Q_UNUSED(context)\n#endif\n\n    QString msgTypeStr = logger::msgTypeToStr(msgType);\n\n    const QString dateTime = QDateTime::currentDateTime().toString(\n                \"yyyy-MM-dd HH:mm:ss\");\n\n    if(typeOrdinal >= desiredVerbosity){\n        QErr() << sLogState().logPreamble << \" \"      <<dateTime<<' '<< msgTypeStr<<\": \"<<msg<< \"\\n\";\n    }\n     QByteArray msgArr =\n             (QString(dateTime + ' ' + msgTypeStr + \" pid %1\" + \": \" + msg)\n              .arg(getpid())).toLocal8Bit();\n     if(g_shell.watchState == E_WatchState::WITHIN_CMD){\n         sendViaSock(msgArr);\n     } else {\n         if(sLogState().bufferedMessages.size() + 1 > sLogState().bufferedMessages.capacity()){\n             QErr() << sLogState().logPreamble << \" \" << qtr(\"Too many log-messages could not be sent \"\n                                                   \"to external %1-process, \"\n                                                   \"so some will be lost (not logged to disk). \"\n                                                   \"This is most likely a bug.\").arg(app::SHOURNAL);\n             sLogState().bufferedMessages.clear();\n         }\n         sLogState().bufferedMessages.push_back(msgArr);\n     }\n\n}\n\n} // namespace\n\nvoid shell_logger::setup()\n{\n    sLogState().logPreamble = QString(app::SHOURNAL) + \" shell-integration\";\n    qInstallMessageHandler(messageHandler);\n}\n\n\n\n/// There is not always a socket to shournal open.\n/// In that case, store them here until flushed\nvoid shell_logger::flushBufferdMessages()\n{\n    for(QByteArray& msg : sLogState().bufferedMessages){\n        sendViaSock(msg);\n    }\n    sLogState().bufferedMessages.clear();\n}\n\n/// Instead of logDebug use this function which works without any\n/// complex initialization. Otherwise we might mess up global variables of\n/// the attached program, e.g. qInstallMessageHandler or\n/// QCoreApplication::setApplicationName ...\n/// Note however that we remove *this shared libaray from LD_PRELOAD _before_\n/// calling qInstallMessageHandler etc. (from within the shell integration scripts),\n/// so we should be mostly safe.\n/// In general it is probably a good idea to not use foreign complex functions\n/// like qInstallMessageHandler from within the shell integration at all ..\nvoid __shell_earlydbg(const char* file, int line, const char *format, ...)\n{\n    const char* verbosityValue = getenv(ENV_VARNAME_SHELL_VERBOSITY);\n    if(verbosityValue == nullptr || strcmp(verbosityValue, \"dbg\") != 0){\n        return;\n    }\n    fprintf(stderr, \"shournal shell integration Dbg: (%s:%d) pid %d: \",\n            file, line, os::getpid());\n\n    va_list args;\n    va_start(args, format);\n    vfprintf(stderr, format, args);\n    va_end(args);\n    fprintf(stderr, \"\\n\");\n}\n"
  },
  {
    "path": "src/shell-integration-fanotify/shell_logger.h",
    "content": "#pragma once\n\n#ifndef __FILENAME__\n#define __FILENAME__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)\n#endif\n\n/// The shell logger prints to stderr,\n/// and sends messages via socket to the external\n/// shournal process, where they are written to file.\n/// In case the socket is closed, the messages are buffered.\nnamespace shell_logger {\n\nvoid setup();\n\nvoid flushBufferdMessages();\n\n}\n\n#ifndef NDEBUG\nvoid __shell_earlydbg(const char* file, int line, const char *format, ...);\n#define shell_earlydbg(format, args...) __shell_earlydbg(__FILENAME__, __LINE__, format, ## args)\n#else\n#define shell_earlydbg(format, args...)\n#endif\n\n"
  },
  {
    "path": "src/shell-integration-fanotify/shell_request_handler.cpp",
    "content": "\n#include <sys/socket.h>\n#include <cassert>\n#include <cstdlib>\n#include <QCoreApplication>\n#include <dirent.h>\n\n#include \"shell_request_handler.h\"\n\n#include \"cleanupresource.h\"\n#include \"logger.h\"\n\n#include \"event_process.h\"\n\n#include \"shell_globals.h\"\n#include \"settings.h\"\n#include \"excos.h\"\n#include \"qsimplecfg/exccfg.h\"\n#include \"subprocess.h\"\n#include \"osutil.h\"\n#include \"fdcommunication.h\"\n#include \"util.h\"\n#include \"commandinfo.h\"\n#include \"app.h\"\n#include \"shell_logger.h\"\n#include \"translation.h\"\n#include \"qoutstream.h\"\n#include \"attached_bash.h\"\n#include \"staticinitializer.h\"\n#include \"socket_message.h\"\n#include \"interrupt_handler.h\"\n#include \"conversions.h\"\n\nusing socket_message::E_SocketMsg;\nusing socket_message::socketMsgToStr;\nusing fdcommunication::SocketCommunication;\nusing osutil::closeVerbose;\nusing shell_request_handler::ShellRequest;\n\n// const char* shellRequestToStr(ShellRequest r){\n//     switch (r) {\n//     case ShellRequest::ENABLE:\n//         return \"enable\";\n//     case ShellRequest::DISABLE:\n//         return \"disable\";\n//     case ShellRequest::PREPARE_CMD:\n//         return \"prepare_cmd\";\n//     case ShellRequest::CLEANUP_CMD:\n//         return \"cleanup_cmd\";\n//     case ShellRequest::PRINT_VERSION:\n//         return \"print_version\";\n//     case ShellRequest::SIGINT_HANDLER_INSTALL:\n//         return \"sigint_install\";\n//     case ShellRequest::SIGINT_HANDLER_RESTORE:\n//         return \"sigint_restore\";\n//     case ShellRequest::DUMMY:\n//         return \"dummy\";\n//     case ShellRequest::ENUM_END:\n//         return \"enumend!\";\n//     }\n//     return \"unkown\";\n// }\n\n\nstatic bool initializeAttachedShellIfNeeded(){\n    auto& g_shell = ShellGlobals::instance();\n    if(g_shell.pAttchedShell != nullptr){\n        return true;\n    }\n    const char* attachedShellName = getenv(\"_SHOURNAL_SHELL_NAME\");\n    if(attachedShellName == nullptr){\n        // should never happen\n        logCritical << \"shell name is not set in environment.\";\n        return false;\n    }\n    try {\n        switch (attachedShellName[0]) {\n        case 'b': // bash\n            g_shell.pAttchedShell = new AttachedBash();\n            break;\n        case 'z': // zsh - nothing special to do.\n            g_shell.pAttchedShell = new AttachedShell();\n            break;\n        default:\n            logCritical << \"unknown shell name:\" << attachedShellName;\n            return false;\n        }\n    } catch (const os::ExcOs& e) {\n        logCritical << \"Failed to initialize attached shell:\" << e.what();\n        return false;\n    }\n\n    return true;\n}\n\n\nstatic bool loadSettings(){\n    try {\n        // maybe_todo: copy file to another path and load the same file later in shournal-run:\n        Settings::instance().load();\n        return true;\n    } catch (const std::exception& e) {\n        logCritical << e.what() << \"\\n\";\n    }\n    logCritical << \"Because of that, the shell observation is disabled\\n\";\n    return false;\n}\n\n\n/// Shells usually start at low numbers for internal file descriptors (usually 10),\n/// we try to find the highest possible free fd\n/// If startFd != -1, start searching from that.\nstatic int verbose_findHighestFreeFd(int startFd=-1){\n    int fd = osutil::findHighestFreeFd(startFd, 30);\n    if(fd == -1){\n        logWarning << qtr(\"Could not find a free file descriptor number. \"\n                          \"The max. number of open files for this process is %1.\")\n                      .arg(osutil::getMaxCountOpenFiles());\n    }\n    return fd;\n}\n\n/// Read update request from environment and check if the request\n/// is valid (log error on exit).\nstatic ShellRequest readCheckShellUpdateRequest(){\n    const char* TRIGGER_NAME = \"_LIBSHOURNAL_TRIGGER\";\n    const char* shellStateStr = getenv(TRIGGER_NAME);\n    if(shellStateStr == nullptr){\n        // No update request\n        return ShellRequest::TRIGGER_UNSET;\n    }\n    if(! ShellGlobals::performBasicInitIfNeeded()){\n        // should never happen.\n        return ShellRequest::TRIGGER_UNSET;\n    }\n\n    uint shellRequestInt;\n    try {\n        qVariantTo_throw(shellStateStr, &shellRequestInt);\n    } catch (const ExcQVariantConvert& ex) {\n        logCritical << qtr(\"Cannot determine shell-request: \")\n                    << ex.descrip();\n        return ShellRequest::TRIGGER_MALFORMED;\n    }\n\n    if(shellRequestInt >= static_cast<int>(ShellRequest::ENUM_END)){\n        logCritical << qtr(\"Invalid shell-request passed:\")\n                    << shellRequestInt;\n        return ShellRequest::TRIGGER_MALFORMED;\n    }\n    auto shellRequest = static_cast<ShellRequest>(shellRequestInt);\n\n    // Note: this logDebug is called BEFORE initialize logging.\n    // logDebug << \"received shell request:\" << int(shellRequest);\n    // QIErr() << \"received shell request:\" << int(shellRequest) << \"(current state:\"\n    //          << int(ShellGlobals::instance().watchState) << \")\";\n    return shellRequest;\n}\n\nstatic void verboseCloseShournalSocket(){\n    auto& g_shell = ShellGlobals::instance();\n    if(g_shell.shournalSocket.sockFd() >= 0 &&\n            close(g_shell.shournalSocket.sockFd()) == -1){\n        logWarning << \"close of shournal-socket failed:\"\n                   << translation::strerror_l();\n\n    }\n    g_shell.shournalSocket.setSockFd(-1);\n}\n\nstatic void verboseCloseRootDirFd(){\n    auto& g_shell = ShellGlobals::instance();\n    if(g_shell.shournalRootDirFd >= 0 &&\n            close(g_shell.shournalRootDirFd) == -1){\n        logWarning << \"close of shournal-root dir-fd failed:\"\n                   << translation::strerror_l();\n\n    }\n    g_shell.shournalRootDirFd = -1;\n}\n\nstatic bool updateShellPID(){\n    const char* _SHOURNAL_SHELL_PID = \"_SHOURNAL_SHELL_PID\";\n    const char* pidValue = getenv(_SHOURNAL_SHELL_PID);\n    if(pidValue == nullptr){\n        logWarning << qtr(\"Required environment variable '%1' \"\n                          \"is unset.\").arg(_SHOURNAL_SHELL_PID);\n        return false;\n    }\n    pid_t pid;\n    try {\n        qVariantTo_throw(pidValue, &pid);\n    } catch (const ExcQVariantConvert& ex) {\n        logWarning << \"Failed to convert pid:\"\n                   << ex.descrip();\n        return false;\n    }\n    auto realPid = getpid();\n    if(pid != realPid){\n        logWarning << qtr(\"Apparently we were enabled from a subshell, which \"\n                          \"is not supported.\");\n        return false;\n    }\n\n    auto& g_shell = ShellGlobals::instance();\n    g_shell.shellParentPid = pid;\n    g_shell.inParentShell = true;\n\n    return true;\n}\n\n\nstatic bool handleDisableRequest(){\n    auto& g_shell = ShellGlobals::instance();\n    if(g_shell.watchState == E_WatchState::DISABLED){\n        logWarning << qtr(\"Received disable-request while shell observation \"\n                          \"was already disabled.\");\n        return false;\n    }\n    g_shell.watchState = E_WatchState::DISABLED;\n\n    verboseCloseShournalSocket();\n    verboseCloseRootDirFd();\n    logDebug << \"shell-integration disabled!\";\n\n    return true;\n}\n\n\nstatic bool handleCleanupCmd(){\n    auto& g_shell = ShellGlobals::instance();\n\n    if(g_shell.watchState != E_WatchState::WITHIN_CMD){\n        //   can for example happen, if\n        // - a user presses enter while the command-string is empty, or Ctrl+C\n        //   is pressed without a currently executing command,\n        // - being at the first prompt after SHOURNAL_ENABLE -> no command\n        //   was observed yet, nothing to clean up\n        logDebug << \"ignoring cleanup-request: not within command.\";\n        return false;\n    }\n\n    auto finalActions = finally([&g_shell] {\n        g_shell.watchState = E_WatchState::INTERMEDIATE;\n        verboseCloseShournalSocket();\n        verboseCloseRootDirFd();\n    });\n\n    QByteArray lastCommand = getenv(\"_SHOURNAL_LAST_COMMAND\");\n    if(lastCommand.isNull()){\n        logWarning << \"Failed to retrieve last command string from environment\";\n        lastCommand = \"UNKNOWN\";\n    }    \n    const char* lastReturnValueStr = getenv(\"_SHOURNAL_LAST_RETURN_VALUE\");\n    qint32 returnVal = CommandInfo::INVALID_RETURN_VAL;\n    if(lastReturnValueStr == nullptr){\n        logWarning << qtr(\"Failed to retrieve last return-value from environment\");\n    } else {\n        try {\n            qVariantTo_throw(lastReturnValueStr, &returnVal);\n        } catch (const ExcQVariantConvert& ex) {\n            logWarning << \"Failed to convert last return value:\"\n                       << ex.descrip();\n        }\n    }\n    logDebug << __func__  << \"sending to shournal-run-fanotify\"\n             << \"($?:\" << returnVal << \"):\"\n             << lastCommand.mid(0, 100);\n    SocketCommunication::Messages messages;\n    messages.push_back({int(E_SocketMsg::COMMAND), lastCommand});\n    messages.push_back({int(E_SocketMsg::RETURN_VALUE), qBytesFromVar(returnVal)});\n    g_shell.shournalSocket.sendMessages(messages);\n\n    return true;\n}\n\n\nstatic bool handleEnableRequest(){\n    if(! initializeAttachedShellIfNeeded()){\n        return false;\n    }\n\n    ShellGlobals::updateVerbosityFromEnv(false);\n    auto& g_shell = ShellGlobals::instance();\n    if(g_shell.watchState != E_WatchState::DISABLED){\n        logDebug << \"received enable request while watchstate != DISABLED\"\n                 << int(E_WatchState::DISABLED);\n    }\n\n    static StaticInitializer initOnFirstCall( [](){\n        // This shell might have been launched within an already observed shell.\n        // Note that when the shell observation was launched, we already left\n        // the observerd mount namespace. Thus all that remains to do is closing\n        // respective fd (which hopefully does not belong to another program..\n        const char* socketNbStr = getenv(app::ENV_VAR_SOCKET_NB);\n        if(socketNbStr == nullptr){\n            return ;\n        }\n        auto laterUnsetIt = finally([] { unsetenv(app::ENV_VAR_SOCKET_NB); });\n\n        int fdNb;\n        try {\n            qVariantTo_throw(socketNbStr, &fdNb);\n        } catch (const ExcQVariantConvert& ex) {\n            logCritical << qtr(\"Bad environment variable %1: \").arg(app::ENV_VAR_SOCKET_NB)\n                        << ex.descrip();\n            return;\n        }\n        if(osutil::fdIsOpen(fdNb)){\n            logDebug << \"initially closing shournal-socket\" << fdNb;\n            closeVerbose(fdNb);\n        } else {\n            logInfo << QString(\"The environment variable %1 is set, but the socket \"\n                                \"%2 is not open\").arg(app::ENV_VAR_SOCKET_NB).arg(fdNb);\n        }\n\n    });\n\n    g_shell.watchState = E_WatchState::INTERMEDIATE;\n    bool madeSafe;\n    g_shell.sessionInfo.uuid = make_uuid(&madeSafe);\n    if(! madeSafe){\n        logInfo << __func__ << qtr(\"session uuid not created 'safe'. Is the uuidd-daemon running?\");\n    }\n    os::setenv(QByteArray(\"SHOURNAL_SESSION_ID\"), g_shell.sessionInfo.uuid.toBase64());\n\n    g_shell.pAttchedShell->handleEnable();\n    logDebug << \"shell-integration enabled!\";\n    return true;\n}\n\n\n/// Launch external shournal (detached) and wait for it to finish unsharing\n/// the mount-NS and fanotify-marking the mounts. Since it is called in a new session\n/// (setsid), it survives the parent shell (*this process), furhter it receives no sigint, destinated\n/// for our shell, which could have caused it to terminate even before installing a SIGIGN-handler.\n/// Pass a socket to shournal, which is used for communication *and* to stop it\n/// (semi-)automatically. Each subsequentially launched process inherits it. Once\n/// all of them finished *and* we cleaned up (or died), external shournal stops.\n/// Note that for processes, which close passed file-descriptors\n/// before exit, shournal might quit too early, in which case file modfication events\n/// are lost\nbool shell_request_handler::handlePrepareCmd(){\n    ShellGlobals::updateVerbosityFromEnv(false);\n    auto& g_shell = ShellGlobals::instance();\n    if(g_shell.watchState == E_WatchState::WITHIN_CMD){\n        // Happens e.g. if a previous cleanup request was ignored due to\n        // an invalid command.\n        logDebug << \"Received setup-request while shell observation \"\n                                   \"was already enabled (might be ok).\";\n        return false;\n    }\n\n    if(! loadSettings()){\n        return false;\n    }\n\n    g_shell.shournalSocket.setSockFd(-1);\n\n    try {\n        g_shell.shournalSocketNb = verbose_findHighestFreeFd();\n        if( g_shell.shournalSocketNb == -1){\n            return false;\n        }\n        g_shell.shournalRootDirFd = verbose_findHighestFreeFd(g_shell.shournalSocketNb - 1);\n        if( g_shell.shournalRootDirFd == -1){\n            return false;\n        }\n\n        auto sockets = os::socketpair(PF_UNIX, SOCK_STREAM);\n\n        auto autocloseSocket0 = finally([&sockets] { close(sockets[0]); });\n        auto autocloseSocket1 = finally([&sockets] { close(sockets[1]); });\n\n        const char* BACKEND_FILENAME = app::SHOURNAL_RUN_FANOTIFY;\n        subprocess::Args_t args = {\n            BACKEND_FILENAME,\n            \"--socket-fd\", std::to_string(sockets[0]),\n            \"--verbosity\", g_shell.shournalRunVerbosity,\n            \"--shell-session-uuid\", g_shell.sessionInfo.uuid.toBase64().data()\n        };\n        const char* tmpdir = getenv(\"TMPDIR\");\n        if(tmpdir != nullptr){\n            args.push_back(\"--tmpdir\");\n            args.push_back(tmpdir);\n        }\n        g_shell.lastMountNamespacePid = -1;\n        subprocess::Subprocess subproc;\n        subproc.setInNewSid(true); // Survive parent shell exit\n        // Pass the socket to the external shournal process for communication purposes.\n        std::unordered_set<int> forwardFs {sockets[0]};\n        if(app::inIntegrationTestMode()){\n            // forward a pipe to async shournal so integration-test knows when it finished\n            const char* pipeFdStr = getenv(\"_SHOURNAL_INTEGRATION_TEST_PIPE_FD\");\n            if(pipeFdStr == nullptr){\n                QIErr() << \"app is set to integration test mode, but pipe-fd is not set...\";\n            } else {\n                int pipeFd = qVariantTo_throw<int>(QByteArray(pipeFdStr));\n                if(! osutil::fdIsOpen(pipeFd)){\n                    QIErr() << \"_SHOURNAL_INTEGRATION_TEST_PIPE_FD set in env \"\n                               \"but fd\" << pipeFd <<  \"is not open\";\n                } else {\n                    forwardFs.insert(pipeFd);\n                }\n            }\n        }\n\n        subproc.setForwardFdsOnExec(forwardFs);\n        subproc.call(args);\n        logDebug << \"launched\" << BACKEND_FILENAME\n                 << \"(pid\" << subproc.lastPid() << \")\";\n\n        os::close(sockets[0]);\n        autocloseSocket0.setEnabled(false);\n\n        // avoid deadlock: close our write end\n        // wait for reply from shournal\n        g_shell.shournalSocket.setReceiveBufferSize(100);\n        g_shell.shournalSocket.setSockFd(sockets[1]);\n\n\n        auto messages=g_shell.shournalSocket.receiveMessages();\n        if(messages.size() != 1 ){\n            logCritical << qtr(\"Setup of external %1-process failed: \"\n                               \"expected one message but received %2\")\n                                .arg(BACKEND_FILENAME)\n                                .arg(messages.size());\n            return false;\n        }\n        auto& socketMsg = messages.first();\n\n\n        if( E_SocketMsg(socketMsg.msgId) != E_SocketMsg::SETUP_DONE){\n            QString msg = (socketMsg.msgId < 0 || socketMsg.msgId >= int(E_SocketMsg::ENUM_END))\n                                       ? qtr(\"Bad response\")\n                                       : socketMsgToStr(E_SocketMsg(socketMsg.msgId));\n\n            logCritical << qtr(\"Setup of external %1-process failed, \"\n                               \"received message: %2 (%3)\")\n                           .arg(BACKEND_FILENAME)\n                           .arg(msg)\n                           .arg(int(socketMsg.msgId));\n            return false;\n        }\n\n        g_shell.lastMountNamespacePid = varFromQBytes(socketMsg.bytes,\n                                                      static_cast<pid_t>(-1));\n        assert(socketMsg.fd != -1);\n\n        if(socketMsg.fd != g_shell.shournalRootDirFd){\n            os::dup2(socketMsg.fd, g_shell.shournalRootDirFd);\n            os::close(socketMsg.fd);\n        }\n        auto RootDirFlags = os::getFdDescriptorFlags(g_shell.shournalRootDirFd);\n        setBitIn(RootDirFlags, FD_CLOEXEC);\n        os::setFdDescriptorFlags(g_shell.shournalRootDirFd, RootDirFlags);\n\n        autocloseSocket1.setEnabled(false);\n        if(sockets[1] != g_shell.shournalSocketNb ){\n            // dup2 and close orig\n            try {\n                os::dup2(sockets[1], g_shell.shournalSocketNb);\n                close(sockets[1]);\n            } catch (const os::ExcOs& ex) {\n                logCritical << \"duplicating to shournal-wait-fd failed: \"\n                            << ex.what();\n                close(sockets[1]);\n                return false;\n            }\n        }\n        g_shell.shournalSocket.setSockFd(g_shell.shournalSocketNb);\n        g_shell.shournalSockFdDescripFlags = os::getFdDescriptorFlags(g_shell.shournalSocketNb);\n\n        g_shell.watchState = E_WatchState::WITHIN_CMD;\n        shell_logger::flushBufferdMessages();\n\n        return true;\n    } catch(const os::ExcOs& ex){\n        logCritical << ex.what();\n    } catch (const std::exception& e) {\n        logCritical << \"Unknown std::exception occurred: \" << e.what() << \"\\n\";\n    } catch (...) {\n        logCritical << \"Unknown exception occurred\\n\";\n    }\n    g_shell.shournalSocket.setSockFd(-1);\n    g_shell.shournalRootDirFd = -1;\n    return false;\n}\n\n\n\n/// If the environment variable '_LIBSHOURNAL_TRIGGER' is set,\n/// perform the set action (load settings, launch external shournal, etc.).\n/// @param success If a valid shell request occured AND was successful this\n/// variable is set to true.\n/// @return the request which occurred or TRIGGER_UNSET/TRIGGER_MALFORMED.\nShellRequest shell_request_handler::checkForTriggerAndHandle(bool *success){\n    *success = false;\n    ShellRequest request = readCheckShellUpdateRequest();\n    switch (request) {\n    case ShellRequest::TRIGGER_UNSET:\n    case ShellRequest::TRIGGER_MALFORMED:\n        return request;\n    default: break;\n    }\n\n    // Interrupt protect mostly applies to waiting for a shournal response, which is\n    // still short enough to justify not being interruptible.\n    InterruptProtect ip(SIGINT);\n\n    auto& g_shell = ShellGlobals::instance();\n\n    if(g_shell.pAttchedShell == nullptr){\n        // not initialized yet: only allow some requests:\n        switch (request) {\n        case ShellRequest::ENABLE:\n        case ShellRequest::PRINT_VERSION:\n        case ShellRequest::UPDATE_VERBOSITY:\n            break;\n        default:\n            QIErr() << int(request) << \"occurred, although the \"\n                    \"attached shell was not initialized (bug?)\";\n            return request;\n        }\n    }\n    if(! updateShellPID()){\n        return ShellRequest::TRIGGER_MALFORMED;\n    }\n\n    switch (request) {\n    case ShellRequest::ENABLE:\n        *success = handleEnableRequest();\n        break;\n    case ShellRequest::DISABLE:\n        *success = handleDisableRequest();\n        break;\n    case ShellRequest::PREPARE_CMD:\n        *success = handlePrepareCmd();\n        break;\n    case ShellRequest::CLEANUP_CMD:\n        *success = handleCleanupCmd();\n        break;\n    case ShellRequest::PRINT_VERSION:\n        QOut() << \"libshournal-shellwatch.so version \" << app::version().toString() << \"\\n\";\n        *success = true;\n        break;\n    case ShellRequest::UPDATE_VERBOSITY:\n        *success = ShellGlobals::updateVerbosityFromEnv(true);\n        break;\n    default:\n        QIErr() << \"BUG! Unhandeld request occurred:\" << int(request);\n    }\n    return request;\n}\n"
  },
  {
    "path": "src/shell-integration-fanotify/shell_request_handler.h",
    "content": "#pragma once\n\n\nnamespace shell_request_handler  {\n\n/// ENABLE: shell observation enabled\n/// DISABLE: shell observation disabled\n/// PREPARE_CMD: prepare observing the next command-sequence\n/// CLEANUP_CMD: stop monitoring the command-sequence and send command-info to external shournal\n/// PRINT_VERSION: print the version of *this* shared library\n/// UPDATE_VERBOSITY: update the verbosity from environment\n/// TRIGGER_UNSET:  The trigger is not set in the environment\n/// TRIGGER_MALFORMED: The trigger is set in the environment but malformed\nenum class ShellRequest {\n                         // To be used by the shell integration-scripts\n                         ENABLE, DISABLE,\n                         PREPARE_CMD, CLEANUP_CMD,\n                         PRINT_VERSION,\n                         UPDATE_VERBOSITY,\n\n                         // Internal use in this shared library\n                         TRIGGER_UNSET,\n                         TRIGGER_MALFORMED,\n                         ENUM_END};\n\nShellRequest checkForTriggerAndHandle(bool *success);\nbool handlePrepareCmd();\n\n} // namespace shell_request_handler\n\n"
  },
  {
    "path": "src/shournal/CMakeLists.txt",
    "content": "\n\nadd_executable(${PROJECT_NAME} ../../html-export/dist/htmlexportres.qrc\n    shournal.cpp\n    argcontrol_dbdelete.cpp\n    argcontrol_dbquery.cpp\n    command_printer.cpp\n    command_printer_html.cpp\n    command_printer_human.cpp\n    command_printer_json.cpp\n    cmd_stats.cpp\n    )\n\n# To keep dependencies low, only generate the main.js\n# when developing the html export.\n# To do so, set HTML_EXPORT_DEV to ON:\n# cmake -DHTML_EXPORT_DEV:BOOL=ON\n# npm and webpack must already be installed\nif (${HTML_EXPORT_DEV})\n    add_custom_target(\n        build_htmlplot_npm\n        WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/html-export\n        COMMAND npm install --production\n        COMMAND npm run build\n        )\n    add_dependencies(${PROJECT_NAME} build_htmlplot_npm)\nendif()\n\n\n\ntarget_link_libraries(${PROJECT_NAME}\n    lib_shournal_common\n    )\n\ninstall(\n    TARGETS ${PROJECT_NAME}\n    RUNTIME DESTINATION bin\n    PERMISSIONS\n                OWNER_READ OWNER_WRITE OWNER_EXECUTE\n                GROUP_READ GROUP_EXECUTE\n                WORLD_READ WORLD_EXECUTE\n)\n"
  },
  {
    "path": "src/shournal/argcontrol_dbdelete.cpp",
    "content": "\n#include <QDebug>\n\n#include \"argcontrol_dbdelete.h\"\n#include \"argcontrol_dbquery.h\"\n#include \"qoutstream.h\"\n\n#include \"qoptargparse.h\"\n#include \"qoptsqlarg.h\"\n#include \"database/db_controller.h\"\n#include \"database/query_columns.h\"\n#include \"cpp_exit.h\"\n#include \"app.h\"\n\nusing argcontol_dbquery::addVariantSqlArgToQueryIfParsed;\nusing argcontol_dbquery::addSimpleSqlArgToQueryIfParsed;\nusing db_controller::QueryColumns;\n\n\nvoid argcontrol_dbdelete::parse(int argc, char *argv[])\n{\n    QOptArgParse parser;\n\n    parser.setHelpIntroduction(qtr(\n        \"Delete commands (and all corresponding file events) from the database by id or date.\"));\n\n    QOptSqlArg argCmdId(\"cmdid\", \"command-id\", qtr(\"Deletes command with given id.\"),\n                          {E_CompareOperator::EQ} );\n    parser.addArg(&argCmdId);\n\n    QOptSqlArg argCmdText(\"cmdtxt\", \"command-text\", qtr(\"Delete commands with matching command-string.\"),\n                        QOptSqlArg::cmpOpsText());\n    parser.addArg(&argCmdText);\n\n    QOptSqlArg argCmdCwd(\"cwd\", \"command-working-dir\",\n                         qtr(\"Delete commands with matching working-directory.\"),\n                          QOptSqlArg::cmpOpsText());\n    parser.addArg(&argCmdCwd);\n\n    QOptSqlArg argCmdDate(\"cmded\", \"command-end-date\", qtr(\"Deletes commands given by end-date. Example:\\n\"\n                                                  \"%1 --delete --command-end-date -between \"\n                                                  \"2019-04-01 2019-04-02\\n\"\n                                                  \"deletes all commands which finished between \"\n                                                  \"the first and second of April 2019.\").arg(app::SHOURNAL),\n                          QOptSqlArg::cmpOpsAllButLike() );\n    parser.addArg(&argCmdDate);\n\n    QOptArg argCmdOlderThan(\"\", \"older-than\", qtr(\"Delete commands older than the given number of \"\n                                                  \"years, months, days, etc.. Example:\\n\"\n                                                  \"%1 --delete --older-than 3y\\n\"\n                                                  \"deletes all commands which were executed more than \"\n                                                  \"three years ago.\").arg(app::SHOURNAL));\n    argCmdOlderThan.setIsRelativeDateTime(true, true);\n    parser.addArg(&argCmdOlderThan);\n\n    QOptArg argCmdYoungerThan(\"\", \"younger-than\", qtr(\"Delete commands younger than the given number of \"\n                                                  \"years, months, days, etc.. Example:\\n\"\n                                                  \"%1 --delete --younger-than 1h\\n\"\n                                                  \"deletes all commands which were executed within \"\n                                                      \"the last hour.\").arg(app::SHOURNAL));\n    argCmdYoungerThan.setIsRelativeDateTime(true, true);\n    parser.addArg(&argCmdYoungerThan);\n\n\n    parser.parse(argc, argv);\n    SqlQuery query;\n\n    auto & cols = QueryColumns::instance();\n\n    addVariantSqlArgToQueryIfParsed<qint64>(query, argCmdId, cols.cmd_id);\n    addVariantSqlArgToQueryIfParsed<QDateTime>(query, argCmdDate, cols.cmd_endtime);\n    addSimpleSqlArgToQueryIfParsed<QString>(query, argCmdText, cols.cmd_txt);\n    addSimpleSqlArgToQueryIfParsed<QString>(query, argCmdCwd, cols.cmd_workingDir);\n\n    if(argCmdOlderThan.wasParsed()){\n        auto olderThanDates = argCmdOlderThan.getVariantRelativeDateTimes();\n        query.addWithAnd(cols.cmd_starttime, olderThanDates, E_CompareOperator::LT );\n    }\n    if(argCmdYoungerThan.wasParsed()){\n        auto youngerThanDates = argCmdYoungerThan.getVariantRelativeDateTimes();\n        query.addWithAnd(cols.cmd_starttime, youngerThanDates, E_CompareOperator::GT );\n    }\n\n    if( parser.rest().len != 0){\n        QIErr() << qtr(\"Invalid parameters passed: %1.\\n\"\n                       \"Show help with --delete --help\").\n                   arg(argvToQStr(parser.rest().len, parser.rest().argv));\n        cpp_exit(1);\n    }\n\n    if(query.isEmpty()){\n        QIErr() << qtr(\"No target fields given (empty query).\");\n        cpp_exit(1);\n    }\n\n    QOut() << qtr(\"%1 command(s) deleted.\").arg( db_controller::deleteCommand(query)) << \"\\n\";\n\n\n    cpp_exit(0);\n}\n"
  },
  {
    "path": "src/shournal/argcontrol_dbdelete.h",
    "content": "#pragma once\n\n\nnamespace argcontrol_dbdelete {\n\n[[noreturn]]\nvoid parse(int argc, char *argv[]);\n\n}\n"
  },
  {
    "path": "src/shournal/argcontrol_dbquery.cpp",
    "content": "#include <QDebug>\n#include <cassert>\n#include <unistd.h>\n\n#include \"argcontrol_dbquery.h\"\n\n#include \"qoptargparse.h\"\n#include \"qoptsqlarg.h\"\n#include \"database/db_globals.h\"\n#include \"database/db_controller.h\"\n#include \"database/query_columns.h\"\n#include \"database/file_query_helper.h\"\n#include \"database/db_conversions.h\"\n#include \"app.h\"\n#include \"logger.h\"\n#include \"qoutstream.h\"\n#include \"cpp_exit.h\"\n#include \"command_printer.h\"\n#include \"command_printer_human.h\"\n#include \"command_printer_json.h\"\n#include \"command_printer_html.h\"\n#include \"console_dialog.h\"\n#include \"osutil.h\"\n#include \"translation.h\"\n#include \"conversions.h\"\n\nusing translation::TrSnippets;\n\nusing db_controller::QueryColumns;\n\n\n[[noreturn]]\nstatic void\nqueryCmdPrintAndExit(std::unique_ptr<CommandPrinter>& cmdPrinter,\n                          SqlQuery& sqlQ,\n                          bool reverseResultIter ){\n    auto results = db_controller::queryForCmd(sqlQ, reverseResultIter);\n    cmdPrinter->printCommandInfosEvtlRestore(results);\n    cpp_exit(0);\n}\n\n[[noreturn]]\nstatic void\nrestoreSingleReadFile(QOptArg& argRestoreRfileId){\n    auto fReadInfo = db_controller::queryReadInfo_byId(\n                static_cast<qint64>(argRestoreRfileId.getValue<uint64_t>())\n                );\n    if(fReadInfo.idInDb == db::INVALID_INT_ID){\n        QIErr() << qtr(\"cannot restore file - no database-entry exists\");\n        cpp_exit(1);\n    }\n    if(! fReadInfo.isStoredToDisk){\n        QIErr() << qtr(\"cannot restore file %1 - only meta-information (path, name, etc.) \"\n                       \"about the file is stored in the database but not the \"\n                       \"file itself.\").arg(fReadInfo.name);\n        cpp_exit(1);\n    }\n\n    QDir currentDir = QDir::current();\n    if(QFile::exists(currentDir.absoluteFilePath(fReadInfo.name)) &&\n       osutil::isTTYForegoundProcess(STDIN_FILENO) &&\n       ! console_dialog::yesNo(qtr(\"File %1 exists. Replace?\").arg(fReadInfo.name)) ) {\n        cpp_exit(0);\n    }\n    StoredFiles().restoreReadFileAtDIr(fReadInfo, currentDir);\n    QOut() << qtr(\"File '%1' restored at current working directory.\").arg(fReadInfo.name) << \"\\n\";\n    cpp_exit(0);\n}\n\nstatic void addFileQuery(SqlQuery &query, const QOptArg& argFile,\n                         const QOptArg& argTakeFromFile, bool readFile){\n    SqlQuery fQuery;\n    if(argTakeFromFile.wasParsed()){\n        bool mtime=false;\n        bool hash=false;\n        bool size=false;\n        for(auto opt : argTakeFromFile.getOptions()){\n            switch(opt[0].toLatin1()){\n            case 'm': mtime = true; break;\n            case 'h': hash = true; break;\n            case 's': size = true; break;\n            default: throw QExcProgramming(\"Bad \"+argFile.name()+\" option: \"+opt);\n            }\n        }\n        fQuery = file_query_helper::buildFileQuery(argFile.getValue<QString>(),\n                                      readFile, mtime, hash, size);\n    } else {\n        fQuery = file_query_helper::buildFileQuerySmart(\n                    argFile.getValue<QString>(), readFile);\n    }\n    query.addWithAnd(fQuery);\n}\n\n\n\nvoid argcontol_dbquery::addBytesizeSqlArgToQueryIfParsed(SqlQuery &query, QOptSqlArg &arg,\n                                                         const QString &tableCol)\n{\n    if(! arg.wasParsed()) return;\n\n    query.addWithAnd(tableCol, arg.getVariantByteSizes(), arg.parsedOperator() );\n}\n\nvoid argcontol_dbquery::parse(int argc, char *argv[])\n{\n    QOptArgParse parser;\n    const std::unordered_set<QString> &TAKE_FROM_FILE_OPTIONS {\n        \"mtime\", \"hash\", \"size\"};\n\n    parser.setHelpIntroduction(qtr(\n        \"Query the command/file-database for several parameters which are\\n\"\n        \"AND-connected. For several fields optional comparison-operators are supported.\\n\"\n        \"The operators are passed in shell-friendly syntax so e.g. \"\n        \"-gt stands for 'greater than'.\\n\"\n        \"-like will allow for using sql wildcards (e.g. '%').\\n\"\n        \"Examples:\\n\"\n        \"%1 --query --wfile /tmp/foo123 - use existing file to find out, how it was created.\\n\"\n        \"%1 --query --wsize -gt 10KiB - print all commands which have written to files whose \"\n                                    \"size is greater than 10KiB.\\n\"\n        \"%1 --query --wpath -like /home/user% - print all commands, which have written to files \"\n                                     \"below /home/user and all subdirectories.\\n\"\n                                   ).arg(app::SHOURNAL) + \"\\n\");\n\n    QOptArg argHistory(\"\", \"history\",\n                            qtr(\"Only display the last N commands, you may optionally \"\n                                \"filter by other parameters as well (like command-text)\")\n                            );\n    parser.addArg(&argHistory);\n\n    // ------------ wfile\n    QOptArg argWFile(\"wf\", \"wfile\",\n                    qtr(\"Pass an existing file(-path) to find out the command, \"\n                        \"which caused the creation/modification of a given file \"\n                        \"(wfile stands for 'written file'). Per default the query is performed on \"\n                        \"the basis of hash(es), mtime and size.\" ));\n    parser.addArg(&argWFile);\n\n    QOptArg argTakeFromWFile(\"\", \"take-from-wfile\",\n                            qtr(\"Specify explicitly which properties to collect \"\n                                \"from the given file passed via %1. \"\n                                \"Typically you do not need this.\").arg(argWFile.name())\n                            );\n    argTakeFromWFile.addRequiredArg(&argWFile);\n    argTakeFromWFile.setAllowedOptions(TAKE_FROM_FILE_OPTIONS);\n    parser.addArg(&argTakeFromWFile);\n\n    const QString wFilePreamble = qtr(\"Query for files written to \");\n    QOptSqlArg argWName(\"wn\", \"wname\", wFilePreamble + qtr(\"by filename.\"),\n                        QOptSqlArg::cmpOpsText());\n    parser.addArg(&argWName);\n\n    QOptSqlArg argWPath(\"wp\", \"wpath\", wFilePreamble + qtr(\"by (full) directory-path.\"),\n                        QOptSqlArg::cmpOpsText(), E_CompareOperator::LIKE);\n    parser.addArg(&argWPath);\n\n    QOptSqlArg argWSize(\"ws\", \"wsize\", wFilePreamble + qtr(\"by filesize.\"),\n                       QOptSqlArg::cmpOpsAllButLike() );\n    argWSize.setIsByteSizeArg(true);\n    parser.addArg(&argWSize);\n\n    QOptSqlArg argWHash(\"wh\", \"whash\", wFilePreamble + qtr(\"by hash.\"),\n                        QOptSqlArg::cmpOpsEqNe() );\n    parser.addArg(&argWHash);\n\n    QOptSqlArg argWMtime(\"wm\", \"wmtime\", wFilePreamble + qtr(\"by mtime.\"),\n                       QOptSqlArg::cmpOpsAllButLike() );\n    parser.addArg(&argWMtime);\n\n    // ------------ rfile\n    QOptArg argRFile(\"rf\", \"rfile\",\n                    qtr(\"Pass an existing file(-path) to find out the command(s), \"\n                        \"which read from it \"\n                        \"(rfile stands for 'read file'). Per default the query is performed on \"\n                        \"the basis of hash(es), mtime and size.\" ));\n    parser.addArg(&argRFile);\n\n    QOptArg argTakeFromRFile(\"\", \"take-from-rfile\",\n                            qtr(\"Specify explicitly which properties to collect \"\n                                \"from the given file passed via %1. \"\n                                \"Typically you do not need this.\").arg(argRFile.name())\n                            );\n    argTakeFromRFile.addRequiredArg(&argRFile);\n    argTakeFromRFile.setAllowedOptions(TAKE_FROM_FILE_OPTIONS);\n    parser.addArg(&argTakeFromRFile);\n\n\n    const QString rFilePreamble = qtr(\"Query for read files \");\n    QOptSqlArg argRName(\"rn\", \"rname\", rFilePreamble + qtr(\"by filename.\"),\n                        QOptSqlArg::cmpOpsText());\n    parser.addArg(&argRName);\n\n    QOptSqlArg argRPath(\"rp\", \"rpath\", rFilePreamble + qtr(\"by (full) directory-path.\"),\n                        QOptSqlArg::cmpOpsText(), E_CompareOperator::LIKE);\n    parser.addArg(&argRPath);\n\n    QOptSqlArg argRSize(\"rs\", \"rsize\", rFilePreamble + qtr(\"by filesize.\"),\n                       QOptSqlArg::cmpOpsAllButLike() );\n    argRSize.setIsByteSizeArg(true);\n    parser.addArg(&argRSize);\n\n    QOptSqlArg argRHash(\"rh\", \"rhash\", rFilePreamble + qtr(\"by hash.\"),\n                        QOptSqlArg::cmpOpsEqNe() );\n    parser.addArg(&argRHash);\n\n    QOptSqlArg argRMtime(\"rm\", \"rmtime\", rFilePreamble + qtr(\"by mtime.\"),\n                       QOptSqlArg::cmpOpsAllButLike() );\n    parser.addArg(&argRMtime);\n\n    QOptArg argMaxReadFileLines(\"\", \"max-rfile-lines\",\n                            qtr(\"Display at most the first N lines for each \"\n                                \"read file.\")\n                            );\n    parser.addArg(&argMaxReadFileLines);\n\n    QOptArg argRestoreRfiles(\"\", \"restore-rfiles\",\n                            qtr(\"Restore read files for the found commands at the system's \"\n                                \"temporary directory.\"),\n                             false\n                            );\n    parser.addArg(&argRestoreRfiles);\n\n    QOptArg argRestoreRfilesAt(\"\", \"restore-rfiles-at\",\n                             qtr(\"Restore read files for the found commands at the given \"\n                                 \"path.\")\n                             );\n    parser.addArg(&argRestoreRfilesAt);\n\n    QOptArg argRestoreRfileId(\"\", \"restore-rfile-id\",\n                             qtr(\"Restore the read file with the given id at the working directory. \"\n                                 \"Please note that id's are not necessarily in \"\n                                 \"an ascending order.\")\n                             );\n    parser.addArg(&argRestoreRfileId);\n\n    // ------------ cmd\n\n    QOptSqlArg argCmdText(\"cmdtxt\", \"command-text\", qtr(\"Query for commands with matching command-string.\"),\n                        QOptSqlArg::cmpOpsText(), E_CompareOperator::LIKE);\n    parser.addArg(&argCmdText);\n\n    QOptSqlArg argCmdCwd(\"cwd\", \"command-working-dir\",\n                         qtr(\"Query for commands with matching working-directory.\"),\n                          QOptSqlArg::cmpOpsText(), E_CompareOperator::LIKE);\n    parser.addArg(&argCmdCwd);\n\n    QOptSqlArg argCmdId(\"cmdid\", \"command-id\", qtr(\"Query for commands with matching ids. \"\n                                                   \"Please note that id's are not necessarily in \"\n                                                   \"an ascending order.\"),\n                        QOptSqlArg::cmpOpsAllButLike());\n    parser.addArg(&argCmdId);\n\n    QOptSqlArg argCmdEndDate(\"cmded\", \"command-end-date\", qtr(\"Query for commands based on \"\n                                                              \"the date (time) they finished.\"),\n                        QOptSqlArg::cmpOpsAllButLike());\n    parser.addArg(&argCmdEndDate);\n\n    // ------------\n\n    QOptSqlArg argShellSessionId(\"sid\", \"shell-session-id\",\n                                 qtr(\"Query for all commands with a given shell-session-id.\"),\n                        QOptSqlArg::cmpOpsEqNe());\n    parser.addArg(&argShellSessionId);\n\n    const uint DEFAULT_wfilesMaxCount = 10;\n    QOptArg argWfilesMaxCount(\"wfc\", \"wfiles-max-count\",\n                              qtr(\"Limit the number of rendered written files \"\n                                  \"per command (default is %1)\").arg(DEFAULT_wfilesMaxCount));\n    parser.addArg(&argWfilesMaxCount);\n\n    const uint DEFAULT_rfilesMaxCount = 10;\n    QOptArg argRfilesMaxCount(\"rfc\", \"rfiles-max-count\",\n                              qtr(\"Limit the number of rendered read files \"\n                                  \"per command (default is %1)\").arg(DEFAULT_rfilesMaxCount));\n    parser.addArg(&argRfilesMaxCount);\n\n    QOptArg argOutputFile(\"o\", \"output\",\n                          qtr(\"Specify an output file where the report \"\n                              \"is written to. Otherwise it is printed \"\n                              \"to stdout\"));\n    parser.addArg(&argOutputFile);\n\n    QOptArg argOutputFormat(\"\", \"output-format\",\n                            qtr(\"Specify the output format (human is default). \"\n                                \"If 'html' is used, %1 must also be specified\")\n                            .arg(argOutputFile.name())); \n\n    const char* OUTPUT_FORMAT_HUMAN = \"human\";\n    argOutputFormat.setAllowedOptions({OUTPUT_FORMAT_HUMAN, \"json\", \"html\"});\n    parser.addArg(&argOutputFormat);\n\n    QOptArg argStatCounts(\"\", \"stat-counts\",\n                          qtr(\"Specify the min. and max. number of entries \"\n                              \"for the overall statistics (e.g. commands with most file modifications) \"\n                              \"as a comma-separated list, e.g. '5,10' to display at least 5 but not more than\"\n                              \"10 entries.\"));\n    parser.addArg(&argStatCounts);\n\n    QOptArg argFileStat(\"\", \"stat\",\n                        qtr(\"Report the current status of files compared to the \"\n                            \"database as U (up to date), \"\n                            \"M (modified), N (not exist) ERROR (in case of an error) or \"\n                            \"NA (not queried, only using json).\"), false);\n    parser.addArg(&argFileStat);\n\n\n    // --------------------- End of Args -----------------------\n\n    parser.parse(argc, argv);\n\n    auto & trSnips = TrSnippets::instance();\n\n    SqlQuery query;\n\n    std::unique_ptr<CommandPrinter> cmdPrinter;\n    if(argOutputFormat.wasParsed()){\n        switch(argOutputFormat.getOptions(1).first()[1].toLatin1()){\n        case 'u': cmdPrinter = std::unique_ptr<CommandPrinter>(new CommandPrinterHuman); break;\n        case 's': cmdPrinter = std::unique_ptr<CommandPrinter>(new CommandPrinterJson);break;\n        case 't': cmdPrinter = std::unique_ptr<CommandPrinter>(new CommandPrinterHtml);break;\n        default: throw QExcProgramming(\"Bad output format:\" + argOutputFormat.getOptions(1).first());\n        }\n    } else {\n        cmdPrinter = std::unique_ptr<CommandPrinter>(new CommandPrinterHuman);\n    }\n    cmdPrinter->setQueryString(argvToQStr(argc, argv));\n\n    cmdPrinter->setMaxCountWfiles(argWfilesMaxCount.getValue<uint>(DEFAULT_wfilesMaxCount));\n    cmdPrinter->setMaxCountRfiles(argRfilesMaxCount.getValue<uint>(DEFAULT_rfilesMaxCount));\n    {\n        const auto statCounts = argStatCounts.getValuesByDelim<QVector<uint> >(\",\", {5,5}, 2,2);\n        if(statCounts[0] > statCounts[1]){\n            throw ExcOptArgParse(qtr(\"argument %1: min. cannot be greater than max. stat-count\")\n                                 .arg(argStatCounts.name()));\n        }\n        cmdPrinter->setMinCountOfStats(statCounts[0]);\n        cmdPrinter->cmdStats().setMaxCountOfStats(statCounts[1]);\n    }\n\n    if(argOutputFile.wasParsed()){\n        cmdPrinter->outputFile().setFileName(argOutputFile.getValue<QString>());\n    } else {\n        if(dynamic_cast<CommandPrinterHtml*>(cmdPrinter.get()) != nullptr){\n            QIErr() << qtr(\"For html-reports, please specify an output file \"\n                           \"(arg %1).\").arg(argOutputFile.name());\n            cpp_exit(1);\n        }\n        cmdPrinter->outputFile().open(stdout, QFile::OpenModeFlag::WriteOnly);\n    }\n\n\n    if(argMaxReadFileLines.wasParsed()){\n        auto cmdPrinterHuman = dynamic_cast<CommandPrinterHuman*>(cmdPrinter.get());\n        if(cmdPrinterHuman == nullptr){\n            QIErr() << qtr(\"Argument %1 is only allowed with output-format '%2'\")\n                       .arg(argMaxReadFileLines.name(), OUTPUT_FORMAT_HUMAN);\n            cpp_exit(1);\n        }\n        cmdPrinterHuman->setMaxCountOfReadFileLines(int(argMaxReadFileLines.getValue<uint>(5)));\n    }\n    cmdPrinter->setRestoreReadFiles(argRestoreRfiles.wasParsed() || argRestoreRfilesAt.wasParsed());\n\n    if(argRestoreRfilesAt.wasParsed()){\n        QDir restoreDir(argRestoreRfilesAt.getValue<QString>());\n        if(! restoreDir.exists()){\n            QIErr() << qtr(\"Restore directory %1 does not exist.\").arg(restoreDir.absolutePath());\n            cpp_exit(1);\n        }\n        restoreDir.setPath(pathJoinFilename(restoreDir.absolutePath(), trSnips.shournalRestore));\n        cmdPrinter->setRestoreDir(restoreDir);\n    }  \n\n    if(argRestoreRfileId.wasParsed()){\n        restoreSingleReadFile(argRestoreRfileId);\n    }\n\n    QueryColumns & cols = QueryColumns::instance();\n\n    addSimpleSqlArgToQueryIfParsed<QString>(query, argWName, cols.wFile_name);\n    addSimpleSqlArgToQueryIfParsed<QString>(query, argWPath, cols.wFile_path);\n    addBytesizeSqlArgToQueryIfParsed(query, argWSize, cols.wFile_size);\n    if(argWHash.wasParsed()){\n        HashValue hashVal(argWHash.getValue<uint64_t>());\n        query.addWithAnd(cols.wFile_hash, db_conversions::fromHashValue(hashVal),\n                         argWHash.parsedOperator());\n    }\n    addVariantSqlArgToQueryIfParsed<QDateTime>(query, argWMtime, cols.wFile_mtime);\n\n\n    addSimpleSqlArgToQueryIfParsed<QString>(query, argRName, cols.rFile_name);\n    addSimpleSqlArgToQueryIfParsed<QString>(query, argRPath, cols.rFile_path);\n    addBytesizeSqlArgToQueryIfParsed(query, argRSize, cols.rFile_size);\n    addVariantSqlArgToQueryIfParsed<QDateTime>(query, argRMtime, cols.rFile_mtime);\n    if(argRHash.wasParsed()){\n        HashValue hashVal(argRHash.getValue<uint64_t>());\n        query.addWithAnd(cols.rFile_hash, db_conversions::fromHashValue(hashVal),\n                         argRHash.parsedOperator());\n    }\n\n    addVariantSqlArgToQueryIfParsed<qint64>(query, argCmdId, cols.cmd_id);\n    addSimpleSqlArgToQueryIfParsed<QString>(query, argCmdText, cols.cmd_txt);\n    addSimpleSqlArgToQueryIfParsed<QString>(query, argCmdCwd, cols.cmd_workingDir);\n    addVariantSqlArgToQueryIfParsed<QDateTime>(query, argCmdEndDate, cols.cmd_endtime);\n\n    if(argShellSessionId.wasParsed()){\n        auto shellSessionUUID = QByteArray::fromBase64(argShellSessionId.getValue<QByteArray>());\n        query.addWithAnd(cols.session_id, shellSessionUUID,\n                         argShellSessionId.parsedOperator());\n    }\n\n    if(argWFile.wasParsed()){\n        addFileQuery(query, argWFile, argTakeFromWFile, false);\n    }\n    if(argRFile.wasParsed()){\n        addFileQuery(query, argRFile, argTakeFromRFile, true);\n    }\n\n    if(argFileStat.wasParsed()){\n        cmdPrinter->setReportFileStatus(true);\n    }\n\n\n    // we always display commands in startDate-order, however,\n    // to allow for a performant history query (where the last\n    // N entries are queried) we traverse the result-set from\n    // end -> reverseResultIter = true AND query.ascending = false.\n    bool reverseResultIter=false;\n\n    // argHistory *must* be last, in case of an otherwise empty\n    // query, accept all (where 1).\n    if(argHistory.wasParsed()){\n        reverseResultIter = true;\n        query.setAscending(false);\n        query.setLimit(static_cast<int>(argHistory.getValue<uint>()));\n        if(query.isEmpty()){\n            // accept everything\n            query.setQuery(\" 1 \");\n        }\n    }\n\n    if( parser.rest().len != 0){\n        QIErr() << qtr(\"Invalid parameters passed: «%1».\\n\"\n                       \"Show help with --query --help\").\n                   arg(argvToQStr(parser.rest().len, parser.rest().argv));\n        cpp_exit(1);\n    }\n\n    if(query.isEmpty()){\n        QIErr() << qtr(\"No target fields given (empty query).\");\n        cpp_exit(1);\n    }\n\n    queryCmdPrintAndExit(cmdPrinter, query, reverseResultIter);\n}\n\n\n"
  },
  {
    "path": "src/shournal/argcontrol_dbquery.h",
    "content": "#pragma once\n\n#include \"sqlquery.h\"\n#include \"qoptsqlarg.h\"\n\nnamespace argcontol_dbquery {\n    [[noreturn]]\n    void parse(int argc, char *argv[]);\n\n    template <class T>\n    void addSimpleSqlArgToQueryIfParsed(SqlQuery& query, QOptSqlArg& arg, const QString& tableCol);\n\n    template <class T>\n    void addVariantSqlArgToQueryIfParsed(SqlQuery& query,\n                                         QOptSqlArg& arg, const QString& tableCol);\n\n    void addBytesizeSqlArgToQueryIfParsed(SqlQuery& query,\n                                         QOptSqlArg& arg, const QString& tableCol);\n}\n\n\n\ntemplate <class T>\nvoid argcontol_dbquery::addSimpleSqlArgToQueryIfParsed(SqlQuery& query,\n                                                       QOptSqlArg& arg, const QString& tableCol){\n    if(! arg.wasParsed()){\n        return;\n    }\n\n    query.addWithAnd(tableCol,\n                     arg.getValue<T>(),\n                     arg.parsedOperator() );\n}\n\ntemplate <class T>\nvoid argcontol_dbquery::addVariantSqlArgToQueryIfParsed(SqlQuery& query,\n                                                       QOptSqlArg& arg, const QString& tableCol){\n    if(! arg.wasParsed()){\n        return;\n    }\n\n    query.addWithAnd(tableCol,\n                     arg.getVariantValues<T>(),\n                     arg.parsedOperator() );\n}\n"
  },
  {
    "path": "src/shournal/cmd_stats.cpp",
    "content": "#include \"cmd_stats.h\"\n\n#include \"cleanupresource.h\"\n\n/// Do not collect more than that many entries of each category\nCmdStats::CmdStats() :\n    m_maxCountOfStats(5)\n{\n    m_cmdsWithMostFileModsQueue.setMaxSize(m_maxCountOfStats);\n}\n\nvoid CmdStats::setMaxCountOfStats(const int &val)\n{\n    m_maxCountOfStats = val;\n    m_cmdsWithMostFileModsQueue.setMaxSize(val);\n}\n\nvoid CmdStats::collectCmd(const CommandInfo &cmd)\n{\n    auto incrementIdxLater = finally([this] { ++m_currentCmdIdx; });\n\n    if(! cmd.fileWriteInfos.isEmpty()){\n        MostFileModsEntry mostFileMods;\n        mostFileMods.idx = m_currentCmdIdx;\n        mostFileMods.idInDb = cmd.idInDb;\n        mostFileMods.cmdTxt = cmd.text;\n        mostFileMods.countOfFileMods = cmd.fileWriteInfos.size();\n        m_cmdsWithMostFileModsQueue.push(mostFileMods);\n    }\n\n    if(! cmd.sessionInfo.uuid.isNull()){\n        auto & el = m_sessionMostCmdsMap[cmd.sessionInfo.uuid];\n        if(el.idx == -1){\n            // remember the first cmd in this session\n            el.idx = m_currentCmdIdx;\n            el.idInDb = cmd.idInDb;\n        }\n        el.cmdUuid = cmd.sessionInfo.uuid;\n        ++el.cmdCount;\n    }\n\n    {\n        auto & cwdCmdCountEntry = m_cwdCmdCountMap[cmd.workingDirectory];\n        ++cwdCmdCountEntry.cmdCount;\n    }\n\n    for(const auto& info : cmd.fileReadInfos){\n        auto & dirIoEntry = m_dirIoCountMap[info.path];\n        ++dirIoEntry.readCount;\n    }\n    for(const auto& info : cmd.fileWriteInfos){\n        auto & dirIoEntry = m_dirIoCountMap[info.path];\n        ++dirIoEntry.writeCount;\n    }\n}\n\n/// aggregate the collected commands -> meant to be called, after all commands\n/// were collected. This function may be called only once as it clears\n/// afterwards not needed data.\nvoid CmdStats::eval()\n{\n    m_cmdsWithMostFileMods = m_cmdsWithMostFileModsQueue.popAll<MostFileModsEntrys>(true);\n\n    limited_priority_queue<SessionMostCmdsEntry,\n                           SessionMostCmds,\n                           cmpSessionMostCmdEntry> sessionMostCmdsPq;\n    sessionMostCmdsPq.setMaxSize(m_maxCountOfStats);\n    for(const auto & el : m_sessionMostCmdsMap){\n        sessionMostCmdsPq.push(el);\n    }\n\n    m_sessionMostCmds = sessionMostCmdsPq.popAll<SessionMostCmds>(true);\n    m_sessionMostCmdsMap.clear();\n\n\n    limited_priority_queue<CwdCmdCount, CwdCmdCounts, cmpCwdCmdCount> cwdCmdCountQueue;\n    cwdCmdCountQueue.setMaxSize(m_maxCountOfStats);\n    for(auto it=m_cwdCmdCountMap.begin(); it != m_cwdCmdCountMap.end(); ++it){\n        // was not yet assigned because here it has to be assigned only once per\n        // working dir\n        it.value().workingDir = it.key();\n        cwdCmdCountQueue.push(it.value());\n    }\n    m_cwdCmdCounts = cwdCmdCountQueue.popAll<CwdCmdCounts>(true);\n    m_cwdCmdCountMap.clear();\n\n\n    limited_priority_queue<DirIoCount, DirIoCounts, cmpDirIoCount> dirIoCountQueue;\n    dirIoCountQueue.setMaxSize(m_maxCountOfStats);\n    for(auto it=m_dirIoCountMap.begin(); it != m_dirIoCountMap.end(); ++it){\n        it.value().dir = it.key();\n        dirIoCountQueue.push(it.value());\n    }\n    m_dirIoCounts = dirIoCountQueue.popAll<DirIoCounts>(true);\n    m_dirIoCountMap.clear();\n}\n\nconst CmdStats::MostFileModsEntrys &CmdStats::cmdsWithMostFileMods() const\n{\n    return m_cmdsWithMostFileMods;\n}\n\nconst CmdStats::SessionMostCmds &CmdStats::sessionMostCmds() const\n{\n    return m_sessionMostCmds;\n}\n\nconst CmdStats::CwdCmdCounts &CmdStats::cwdCmdCounts() const\n{\n    return m_cwdCmdCounts;\n}\n\nconst CmdStats::DirIoCounts &CmdStats::dirIoCounts() const\n{\n    return m_dirIoCounts;\n}\n"
  },
  {
    "path": "src/shournal/cmd_stats.h",
    "content": "#pragma once\n\n#include <QVector>\n#include <QHash>\n\n#include \"commandinfo.h\"\n#include \"limited_priority_queue.h\"\n\nclass CmdStats\n{\npublic:\n    // Commands which modified the most files\n    struct MostFileModsEntry {\n        int idx; // zero based collectCmd index (first command -> 0...)\n        qint64 idInDb;\n        QString cmdTxt;\n        int countOfFileMods;\n    };\n\n    // Sessions where the most commands where executed in\n    struct SessionMostCmdsEntry {\n        int idx {-1}; // idx of the first cmd of this session\n        qint64 idInDb{-1}; // id of the first cmd of this session\n        int cmdCount{0}; // number of commands executed in this session\n        QByteArray cmdUuid;\n    };\n\n    // Count of commands executed in\n    // a specific CurrentWorkingDirectory\n    struct CwdCmdCount {\n        QString workingDir;\n        int cmdCount{0};\n    };\n\n    // Directories, where the most files were read and written\n    struct DirIoCount {\n        QString dir;\n        qint64 readCount{0};\n        qint64 writeCount{0};\n    };\n\n    typedef QVector<MostFileModsEntry> MostFileModsEntrys;\n    typedef QVector<SessionMostCmdsEntry> SessionMostCmds;\n    typedef QVector<CwdCmdCount> CwdCmdCounts;\n    typedef QVector<DirIoCount> DirIoCounts;\n\npublic:\n\n    CmdStats();\n\n    void setMaxCountOfStats(const int &val);\n\n    void collectCmd(const CommandInfo& cmd);\n\n    void eval();\n\n    const MostFileModsEntrys& cmdsWithMostFileMods() const;\n\n    const SessionMostCmds& sessionMostCmds() const;\n\n    const CwdCmdCounts& cwdCmdCounts() const;\n\n    const DirIoCounts& dirIoCounts() const;\n\nprivate:\n    struct cmpFileModEntry {\n        bool operator()(const MostFileModsEntry & e1, const MostFileModsEntry & e2) {\n            return e1.countOfFileMods > e2.countOfFileMods;\n        }\n    };\n\n    struct cmpSessionMostCmdEntry {\n        bool operator()(const SessionMostCmdsEntry & e1, const SessionMostCmdsEntry & e2) {\n            return e1.cmdCount > e2.cmdCount;\n        }\n    };\n\n    struct cmpCwdCmdCount {\n        bool operator()(const CwdCmdCount & e1, const CwdCmdCount & e2) {\n            return e1.cmdCount > e2.cmdCount;\n        }\n    };\n\n    struct cmpDirIoCount {\n        bool operator()(const DirIoCount & e1, const DirIoCount & e2) {\n            return e1.readCount + e1.writeCount > e2.readCount + e2.writeCount;\n        }\n    };\n    limited_priority_queue<MostFileModsEntry,\n                           MostFileModsEntrys,\n                           cmpFileModEntry> m_cmdsWithMostFileModsQueue;\n    MostFileModsEntrys m_cmdsWithMostFileMods;\n\n    QHash<QByteArray, SessionMostCmdsEntry> m_sessionMostCmdsMap;\n    SessionMostCmds m_sessionMostCmds;\n\n    QHash<QString, CwdCmdCount> m_cwdCmdCountMap;\n    CwdCmdCounts m_cwdCmdCounts;\n\n    QHash<QString, DirIoCount> m_dirIoCountMap;\n    DirIoCounts m_dirIoCounts;\n\n\n    int m_maxCountOfStats;\n    int m_currentCmdIdx{0};\n};\n\n"
  },
  {
    "path": "src/shournal/command_printer.cpp",
    "content": "\n#include <sys/ioctl.h>\n#include <cstdio>\n#include <unistd.h>\n\n#include <QDebug>\n#include <QDir>\n#include <QStandardPaths>\n\n#include \"command_printer.h\"\n#include \"qformattedstream.h\"\n#include \"util.h\"\n#include \"qfilethrow.h\"\n#include \"db_controller.h\"\n#include \"logger.h\"\n#include \"file_query_helper.h\"\n#include \"excos.h\"\n#include \"os.h\"\n#include \"translation.h\"\n\nusing translation::TrSnippets;\n\nstatic QString buildRestorePath(){\n    return\n      pathJoinFilename(\n        QStandardPaths::writableLocation(QStandardPaths::TempLocation),\n        TrSnippets::instance().shournalRestore + \"-\" + os::getUserName<QString>()\n        );\n}\n\n\nCommandPrinter::CommandPrinter() :\n    m_restoreDir(buildRestorePath())\n{}\n\n\nvoid CommandPrinter::createRestoreTopleveDirIfNeeded()\n{\n    if(m_countOfRestoredFiles == 0){\n        // initially create restore dir\n        if(! m_restoreDir.mkpath(m_restoreDir.absolutePath())){\n            throw QExcIo(qtr(\"Failed to the create top-level read-files restore directory at %1\")\n                                 .arg(m_restoreDir.absolutePath()), false);\n        }\n    }\n}\n\nvoid CommandPrinter::restoreReadFile_safe(const FileReadInfo &readInfo, const QString &cmdIdStr)\n{\n    QFileThrow f(m_storedFiles.mkPathStringToStoredReadFile(readInfo));\n    f.open(QFile::ReadOnly);\n    restoreReadFile_safe(readInfo, cmdIdStr, f);\n}\n\n\nvoid CommandPrinter::restoreReadFile_safe(const FileReadInfo &readInfo, const QString &cmdIdStr,\n                                  const QFile &openReadFile)\n{  \n    QDir fullDirPath(\n           pathJoinFilename(m_restoreDir.absoluteFilePath(qtr(\"command-id-\") + cmdIdStr)\n                            ,readInfo.path));\n\n    const QString failMsg(qtr(\"Failed to restore read file with id %1:\").arg(readInfo.idInDb));\n    try {\n        if(! fullDirPath.mkpath(fullDirPath.absolutePath())){\n            throw QExcIo(qtr(\"Failed to create the read-files restore directory for command-id %1\")\n                                 .arg(cmdIdStr));\n        }\n        m_storedFiles.restoreReadFileAtDIr(readInfo, fullDirPath, openReadFile);\n        ++m_countOfRestoredFiles;\n    } catch (const os::ExcOs& e) {\n        logWarning << failMsg << e.what();\n    } catch(const QExcIo& e){\n         logWarning << failMsg << e.descrip();\n    }\n}\n\n/// Do not output statistics, if less than 'val' entries\nvoid CommandPrinter::setMinCountOfStats(int val)\n{\n    m_minCountOfStats = val;\n}\n\nvoid CommandPrinter::setReportFileStatus(bool val)\n{\n    m_reportFileStatus = val;\n}\n\nbool CommandPrinter::reportFileStatus() const\n{\n    return m_reportFileStatus;\n}\n\nvoid CommandPrinter::setMaxCountRfiles(int maxCountRfiles)\n{\n    m_maxCountRfiles = maxCountRfiles;\n}\n\nCmdStats &CommandPrinter::cmdStats()\n{\n    return m_cmdStats;\n}\n\nvoid CommandPrinter::setMaxCountWfiles(int maxCountWfiles)\n{\n    m_maxCountWfiles = maxCountWfiles;\n}\n\nvoid CommandPrinter::setQueryString(const QString &queryString)\n{\n    m_queryString = queryString;\n}\n\n\nvoid CommandPrinter::setRestoreDir(const QDir &restoreDir)\n{\n    m_restoreDir = restoreDir;\n}\n\nQFileThrow &CommandPrinter::outputFile()\n{\n    return m_outputFile;\n}\n\n\nvoid CommandPrinter::setRestoreReadFiles(bool restoreReadFiles)\n{\n    m_restoreReadFiles = restoreReadFiles;\n}\n"
  },
  {
    "path": "src/shournal/command_printer.h",
    "content": "#pragma once\n\n#include <memory>\n\n#include \"storedfiles.h\"\n#include \"conversions.h\"\n#include \"qfilethrow.h\"\n#include \"cmd_stats.h\"\n\n\nclass CommandQueryIterator;\nclass QFormattedStream;\n\n/// Base class for command-printers (human, json).\n/// Print command-infos and corresponding file events to stdout.\n/// Since we can potentially stream only once over the sql-result,\n/// restore read files on the fly, if configured so.\nclass CommandPrinter\n{\npublic:\n    CommandPrinter();\n    virtual ~CommandPrinter() = default;\n\n    virtual void printCommandInfosEvtlRestore(std::unique_ptr<CommandQueryIterator>& cmdIter) = 0;\n    virtual void setRestoreReadFiles(bool restoreReadFiles);\n    virtual void setRestoreDir(const QDir &restoreDir);\n    virtual QFileThrow& outputFile();\n    virtual void setQueryString(const QString &queryString);\n    virtual void setMaxCountWfiles(int maxCountWfiles);\n    virtual void setMaxCountRfiles(int maxCountRfiles);\n    virtual CmdStats& cmdStats();\n    virtual void setMinCountOfStats(int val);\n    virtual void setReportFileStatus(bool val);\n    virtual bool reportFileStatus() const;\n\nprotected:\n    Q_DISABLE_COPY(CommandPrinter)\n\n    void createRestoreTopleveDirIfNeeded();\n\n    void restoreReadFile_safe(const FileReadInfo& readInfo,\n                         const QString &cmdIdStr);\n    void restoreReadFile_safe(const FileReadInfo& readInfo,\n                         const QString &cmdIdStr, const QFile &openReadFile);\n\n\n    StoredFiles m_storedFiles;\n    bool m_restoreReadFiles {false};\n    int m_countOfRestoredFiles {0};\n    QDir m_restoreDir;\n    QFileThrow m_outputFile;\n    QString m_queryString; // entered by user on commandline\n    int m_maxCountWfiles{0}; // do not print more than that number of written files per command\n    int m_maxCountRfiles{0};\n    CmdStats m_cmdStats;\n    int m_minCountOfStats;\n    bool m_reportFileStatus{false};\n};\n\n\n\n\n\n"
  },
  {
    "path": "src/shournal/command_printer_html.cpp",
    "content": "\n#include <unordered_set>\n#include <QLinkedList>\n#include <QJsonObject>\n#include <QJsonDocument>\n#include <QJsonArray>\n#include <QResource>\n#include <QTemporaryFile>\n\n#include \"command_printer_html.h\"\n#include \"command_query_iterator.h\"\n#include \"logger.h\"\n#include \"util.h\"\n#include \"cleanupresource.h\"\n#include \"stupidinject.h\"\n#include \"qresource_helper.h\"\n#include \"qoutstream.h\"\n\nusing qresource_helper::data_safe;\n\n\nvoid CommandPrinterHtml::printCommandInfosEvtlRestore(std::unique_ptr<CommandQueryIterator> &cmdIter)\n{\n    if( cmdIter->computeSize() == 0){\n        QOut() << qtr(\"No results found matching the query.\\n\");\n        return;\n    }\n\n    Q_INIT_RESOURCE(htmlexportres);\n    QResource indexHtmlResource(\"://index.html\");\n    QByteArray html_content = data_safe(indexHtmlResource);\n\n    if(! m_outputFile.isOpen()){\n        m_outputFile.open(QFile::OpenModeFlag::WriteOnly);\n    }\n    QTextStream outstream(&m_outputFile);\n\n    StupidInject inject;\n\n    QTemporaryFile tmpCmdDataFile;\n    tmpCmdDataFile.open();\n\n    FileReadInfoSet_t readFileIdSet;\n\n    inject.addInjection( \"<script src=\\\"SAMPLE_DATA.js\\\"></script>\",\n                         [this, &cmdIter, &tmpCmdDataFile, &readFileIdSet](QTextStream& outstream){\n        // json performance much better-> embed into html\n        outstream << R\"(<script id=\"commandJSON\" type=\"application/json\">)\";\n\n        outstream << \"[\";\n\n        // commands are sorted by start-date, but the last started command\n        // may end before the second to last (and so on).\n        // So keep track of the final command end date\n        QDateTime finalCommandEndDate = QDateTime::fromTime_t(0);\n        const auto queryDate = QDateTime::currentDateTime();\n        bool isFirst = true;\n        while(cmdIter->next()){\n            this->addScriptsToReadFilesSet((cmdIter->value().fileReadInfos), readFileIdSet);\n            if(isFirst){\n                isFirst = false;\n            } else {\n                // sepearte json objects by comma:\n                outstream << \",\";\n            }\n\n            this->processSingleCommand(outstream, cmdIter->value(), finalCommandEndDate,\n                                       tmpCmdDataFile);\n        }\n\n        outstream << \"]\";\n\n        outstream << \"</script>\\n\";\n\n        // store the rest as plain js:\n        outstream << \"<script>\\n\";\n\n        outstream << \"const ORIGINAL_QUERY = '\" << m_queryString << \"';\\n\";\n        outstream << \"const ORIGINAL_QUERY_DATE_STR = '\"\n                  << queryDate.toString(Conversions::dateIsoFormatWithMilliseconds()) << \"';\\n\";\n\n        outstream << \"const CMD_FINAL_ENDDATE_STR = '\"\n                  << finalCommandEndDate.toString(Conversions::dateIsoFormatWithMilliseconds()) << \"';\\n\";\n        outstream << \"</script>\\n\";\n    });\n\n    inject.addInjection(\"<script src=\\\"main.js\\\"></script>\",\n                        [this, &tmpCmdDataFile, &readFileIdSet](QTextStream& outstream){\n        // currently the whole js is compiled into a single main.js file,\n        // which we inject here for convenience.\n        QResource mainJSResource(\"://main.js\");\n        QByteArray mainJsContent = data_safe(mainJSResource);\n\n        outstream << \"<script>\";\n\n        outstream << mainJsContent.data();\n\n        outstream << \"</script>\\n\";\n\n\n        // write the cmd-data of the tempfile to html\n        if(! tmpCmdDataFile.seek(0)){\n            throw QExcIo(\"Failed to seek to 0 in cmdData tmpfile: \" +  tmpCmdDataFile.errorString());\n        }\n\n        QByteArray line;\n        uint linecounter = 0;\n        while(! (line = tmpCmdDataFile.readLine()).isEmpty()){\n            // pop \\n\n            line.resize(line.size() - 1);\n            outstream << \"<script id=\\\"commandDataJSON\" << linecounter\n                      << R\"(\" type=\"application/json\">)\";\n            outstream << line;\n\n            outstream << \"</script>\\n\";\n            ++linecounter;\n        }\n\n        outstream << \"\\n<script>\\n\";\n\n        // write the command-statistics arrays by giving the\n        // indeces of the respective commands in the original\n        // commands-array\n        m_cmdStats.eval();\n        writeStatistics(outstream);\n\n        // finally write read files:\n        writeReadFileContentsToHtml(outstream, readFileIdSet);\n\n        outstream << \"\\n</script>\\n\";\n\n\n    });\n\n    inject.stream(html_content, outstream);\n}\n\n\nvoid CommandPrinterHtml::processSingleCommand(QTextStream& outstream,\n                                              CommandInfo& cmd,\n                                              QDateTime& finalCommandEndDate,\n                                              QTemporaryFile &tmpCmdDataFile){\n\n    m_cmdStats.collectCmd(cmd);\n\n    if(cmd.startTime.msecsTo(cmd.endTime) < 1){\n        // for the plot we need at least one millisecond time difference to draw a rect:\n        cmd.endTime = cmd.endTime.addMSecs(1);\n    }\n    finalCommandEndDate = std::max(finalCommandEndDate, cmd.endTime);\n\n    // to speed up loading of the html document (especially useful\n    // for more than 2000 entries), we 'split' up the command\n    // data, to first (quickly) render the session time-line and\n    // load the rest afterwards. For the timeline and command-list, only id, start/end-date, uuid and\n    // text are necessary.\n    writeCmdStartup(cmd, outstream);\n\n    // write the 'rest' to tempfile, line by line\n    writeCmdData(cmd, tmpCmdDataFile);\n\n}\n\nvoid CommandPrinterHtml::writeCmdStartup(const CommandInfo &cmd, QTextStream &outstream)\n{\n    QJsonObject jsonCmdStartup;\n\n    CmdJsonWriteCfg cmdJsonStartup(false);\n    cmdJsonStartup.maxCountRFiles = m_maxCountRfiles;\n    cmdJsonStartup.maxCountWFiles = m_maxCountWfiles;\n    cmdJsonStartup.idInDb = true;\n    cmdJsonStartup.startEndTime = true;\n    cmdJsonStartup.sessionInfo = true;\n    cmdJsonStartup.text = true;\n    cmd.write(jsonCmdStartup, m_writeDatesWithMillisec, cmdJsonStartup);\n\n    outstream << QJsonDocument(jsonCmdStartup).toJson(QJsonDocument::Compact);\n}\n\nvoid CommandPrinterHtml::writeCmdData(const CommandInfo &cmd,\n                                      QTemporaryFile &tmpCmdDataFile)\n{\n    CmdJsonWriteCfg cmdJsonData(true);\n    cmdJsonData.maxCountRFiles = m_maxCountRfiles;\n    cmdJsonData.maxCountWFiles = m_maxCountWfiles;\n\n    cmdJsonData.idInDb = false;\n    cmdJsonData.startEndTime = false;\n    cmdJsonData.sessionInfo = false;\n    cmdJsonData.text = false;\n\n    QJsonObject jsonCmdData;\n    cmd.write(jsonCmdData, m_writeDatesWithMillisec, cmdJsonData);\n\n    // since we may restrict the number of read/written files (to not generate\n    // huge html-files), store the real number in any case:\n    jsonCmdData[\"fileReadEvents_length\"] = cmd.fileReadInfos.length();\n    jsonCmdData[\"fileWriteEvents_length\"] = cmd.fileWriteInfos.length();\n\n    if(tmpCmdDataFile.write(QJsonDocument(jsonCmdData).toJson(QJsonDocument::Compact)) == -1){\n        throw QExcIo(\"Failed to write cmdData to tmpfile: \" +  tmpCmdDataFile.errorString());\n    }\n    tmpCmdDataFile.write(\"\\n\");\n}\n\n\nvoid CommandPrinterHtml::addScriptsToReadFilesSet(const FileReadInfos &infos,\n                                                  FileReadInfoSet_t &set)\n{\n    int counter = 0;\n    for(const auto& info : infos){\n        if(info.isStoredToDisk){\n            // don't check mimetype here, to avoid performing it multiple times\n            // for the same script-id\n            set.insert(info.idInDb);\n        }\n        ++counter;\n        if(counter > m_maxCountRfiles){\n            break;\n        }\n    }\n}\n\nvoid CommandPrinterHtml::writeReadFileContentsToHtml(QTextStream &outstream,\n                                                     FileReadInfoSet_t &readFileIdSet)\n{\n    // uniquely store each script in the html file\n    outstream << \"const readFileContentMap = new Map([\\n\";\n    auto autoCloseNewMap = finally([&outstream] {  outstream << \"]);\\n\"; });\n\n    for(const auto& id_ : readFileIdSet) {\n        // javascript Maps can take 2d arrays in the constructor.\n        // Each array entry has the format [key, value].\n        outstream << \"[\" << id_ << \",\";\n        auto autoCloseBracket = finally([&outstream] { outstream << \"],\\n\"; });\n        QFileThrow f(m_storedFiles.mkPathStringToStoredReadFile(id_));\n        try {\n            f.open(QFile::OpenModeFlag::ReadOnly);\n            auto mtype = m_mimedb.mimeTypeForData(&f);\n            if(! mtype.inherits(\"text/plain\")){\n                outstream << \"null\"; // don't use 'undefined' here!\n                continue;\n            }\n            outstream << \"\\\"\";\n            auto autoSetQuote = finally([&outstream] { outstream << \"\\\"\"; });\n            writeFileToStream(f, outstream);\n\n        } catch (const QExcIo& e) {\n            logWarning << qtr(\"Error writing read file with id %1 to html: %2\")\n                          .arg(id_).arg(e.descrip());\n        }\n    }\n}\n\nvoid CommandPrinterHtml::writeFileToStream(QFileThrow &f, QTextStream &outstream)\n{\n    const int BUFSIZE = 9000; // MUST be divisible by 3, so we create no padding '='\n                              // between base64-chunks (;\n    char buf[BUFSIZE];\n    qint64 readCount;\n    while(  (readCount = f.read(buf, BUFSIZE)) > 0 ){\n        // we could be writing anything here to the js file - KISS, and use base64\n        outstream << QByteArray::fromRawData(buf, int(readCount)).toBase64();\n    }\n}\n\nvoid CommandPrinterHtml::writeStatistics(QTextStream &outstream)\n{\n    {\n        QJsonArray jsonMostFileMods;\n        if(m_cmdStats.cmdsWithMostFileMods().size() >= m_minCountOfStats) {\n            for(const auto& e : m_cmdStats.cmdsWithMostFileMods()){\n                QJsonObject o;\n                o[\"idx\"] = e.idx;\n                o[\"countOfFileMods\"] = e.countOfFileMods;\n                jsonMostFileMods.append(o);\n            }\n        }\n        outstream << \"const mostFileMods = \"\n                  << QJsonDocument(jsonMostFileMods).toJson(QJsonDocument::Compact) << \"\\n\";\n    }\n\n    {\n        QJsonArray jsonSessionsMostCmds;\n        if(m_cmdStats.sessionMostCmds().size() >= m_minCountOfStats) {\n            for(const auto & e : m_cmdStats.sessionMostCmds()){\n                QJsonObject o;\n                o[\"idxFirstCmd\"] = e.idx;\n                o[\"countOfCommands\"] = e.cmdCount;\n                jsonSessionsMostCmds.append(o);\n            }\n        }\n        outstream << \"const sessionsMostCmds = \"\n                  << QJsonDocument(jsonSessionsMostCmds).toJson(QJsonDocument::Compact) << \"\\n\";\n    }\n\n    {\n        QJsonArray json;\n        if(m_cmdStats.cwdCmdCounts().size() >= m_minCountOfStats) {\n            for(const auto & e : m_cmdStats.cwdCmdCounts()){\n                QJsonObject o;\n                o[\"workingDir\"] = e.workingDir;\n                o[\"countOfCommands\"] = e.cmdCount;\n                json.append(o);\n            }\n        }\n        outstream << \"const cwdCmdCounts = \"\n                  << QJsonDocument(json).toJson(QJsonDocument::Compact) << \"\\n\";\n    }\n\n    {\n        QJsonArray json;\n        if(m_cmdStats.dirIoCounts().size() >= m_minCountOfStats) {\n            for(const auto & e : m_cmdStats.dirIoCounts()){\n                QJsonObject o;\n                o[\"dir\"] = e.dir;\n                o[\"readCount\"] = e.readCount;\n                o[\"writeCount\"] = e.writeCount;\n                json.append(o);\n            }\n        }\n        outstream << \"const dirIoCounts = \"\n                  << QJsonDocument(json).toJson(QJsonDocument::Compact) << \"\\n\";\n    }\n\n\n\n}\n\n\n"
  },
  {
    "path": "src/shournal/command_printer_html.h",
    "content": "#pragma once\n\n#include <unordered_set>\n#include <QMimeDatabase>\n\n#include \"command_printer.h\"\n#include \"fileinfos.h\"\n#include \"commandinfo.h\"\n#include \"qfilethrow.h\"\n#include \"cmd_stats.h\"\n\nclass QTextStream;\nclass QTemporaryFile;\n\nclass CommandPrinterHtml : public CommandPrinter\n{\npublic:\n    CommandPrinterHtml() = default;\n\n    void printCommandInfosEvtlRestore(std::unique_ptr<CommandQueryIterator>& cmdIter) override;\n\nprotected:\n     Q_DISABLE_COPY(CommandPrinterHtml)\n\n    typedef std::unordered_set<qint64> FileReadInfoSet_t;\n    void processSingleCommand(QTextStream& outstream, CommandInfo& cmd, QDateTime&\n                              finalCommandEndDate, QTemporaryFile& tmpCmdDataFile);\n    void writeCmdStartup(const CommandInfo& cmd, QTextStream& outstream);\n    void writeCmdData(const CommandInfo& cmd,\n                      QTemporaryFile& tmpCmdDataFile);\n\n    void addScriptsToReadFilesSet(const FileReadInfos& infos, FileReadInfoSet_t& set);\n    void writeReadFileContentsToHtml(QTextStream& outstream, FileReadInfoSet_t& readFileIdSet);\n    void writeFileToStream(QFileThrow& f, QTextStream& outstream);\n    void writeStatistics(QTextStream& outstream);\n\n    QMimeDatabase m_mimedb;\n    bool m_writeDatesWithMillisec{true};\n\n};\n\n\n"
  },
  {
    "path": "src/shournal/command_printer_human.cpp",
    "content": "#include \"command_printer_human.h\"\n\n#include <sys/ioctl.h>\n#include <cstdio>\n#include <unistd.h>\n\n#include <QDebug>\n#include <QDir>\n#include <QStandardPaths>\n#include <QJsonDocument>\n#include <QHostInfo>\n\n#include \"command_printer.h\"\n#include \"qformattedstream.h\"\n#include \"util.h\"\n#include \"qfilethrow.h\"\n#include \"db_controller.h\"\n#include \"logger.h\"\n#include \"file_query_helper.h\"\n#include \"excos.h\"\n#include \"os.h\"\n#include \"translation.h\"\n#include \"qoutstream.h\"\n#include \"commandinfo.h\"\n\n\nvoid CommandPrinterHuman::printCommandInfosEvtlRestore(std::unique_ptr<CommandQueryIterator> &cmdIter)\n{\n    if( cmdIter->computeSize() == 0){\n        QOut() << qtr(\"No results found matching the query.\\n\");\n        return;\n    }\n    if(! m_outputFile.isOpen()){\n        m_outputFile.open(QFile::OpenModeFlag::WriteOnly);\n    }\n\n    QFormattedStream s(&m_outputFile);\n    struct winsize termWinSize{};\n    if(ioctl(STDOUT_FILENO, TIOCGWINSZ, &termWinSize) == 0){\n        s.setMaxLineWidth((termWinSize.ws_col > 5) ? termWinSize.ws_col : 80 );\n    } else {\n        // this happens e.g. when the output is piped to grep\n        logDebug << \"failed to determine terminal size, using max...\";\n        s.setMaxLineWidth( std::numeric_limits<int>::max() );\n    }\n\n    const QString currentHostname = QHostInfo::localHostName();\n    while(cmdIter->next()){\n        m_cmdStats.collectCmd(cmdIter->value());\n        s.setLineStart(m_indentlvl0);\n        // for indentlvl0 line-word-wrapping makes almost no\n        // sense and hinders copy-pasting  of long terminal commands.\n        auto oldMaxLineWidth = s.maxLineWidth();\n        s.setMaxLineWidth(std::numeric_limits<int>::max());\n\n        auto & cmd = cmdIter->value();\n        s << qtr(\"cmd-id %1\").arg(cmd.idInDb);\n        if(cmd.returnVal != CommandInfo::INVALID_RETURN_VAL){\n            s << qtr(\"$?=%1\").arg(QString::number(cmd.returnVal));\n        }\n\n        // Only print end-date/time, if different from start-date/time, ignoring seconds.\n        dropFromTime(cmd.startTime, 's');\n        dropFromTime(cmd.endTime, 's');\n        const QString tformat = \"hh:mm\";\n        const QString dtformat = QString(\"yyyy-MM-dd\") + \" \" + tformat;\n        QString cmdEndTime = (cmd.startTime.date() != cmd.endTime.date()) ?\n                              \" - \" + cmd.endTime.toString(dtformat) :\n                                (cmd.startTime.time() != cmd.endTime.time()) ?\n                                \"-\" + cmd.endTime.time().toString(tformat) : \"\";\n        s << cmd.startTime.toString(dtformat) + cmdEndTime << \"$\"\n          << cmd.text << \"\\n\";\n        s << qtr(\"Working directory: %1\\n\").arg(cmd.workingDirectory);\n        if(! cmd.sessionInfo.uuid.isNull()){\n            s << qtr(\"session-uuid\") << cmd.sessionInfo.uuid.toBase64() << \"\\n\";\n        }\n        if(cmd.hostname != currentHostname){\n            s << qtr(\"Hostname: %1\\n\").arg(cmd.hostname);\n        }\n        s.setMaxLineWidth(oldMaxLineWidth);\n\n        printWriteInfos(cmd, s);\n        printReadInfos(s, cmd);\n    }\n\n    m_cmdStats.eval();\n\n    if(m_cmdStats.cmdsWithMostFileMods().size() >= m_minCountOfStats){\n        s.setLineStart(m_indentlvl0);\n        s << qtr(\"\\nCommands with most file modifications:\\n\");\n        s.setLineStart(m_indentlvl1);\n        for(const auto& e : m_cmdStats.cmdsWithMostFileMods()){\n            s << qtr(\"cmd-id %1 modified %2 file(s) - %3\\n\")\n                 .arg(e.idInDb).arg(e.countOfFileMods).arg(e.cmdTxt);\n        }\n\n    }\n\n    if(m_cmdStats.sessionMostCmds().size() >= m_minCountOfStats){\n        s.setLineStart(m_indentlvl0);\n        s << qtr(\"\\nSessions with most commands:\\n\");\n        s.setLineStart(m_indentlvl1);\n        for(const auto& e : m_cmdStats.sessionMostCmds()){\n            s << qtr(\"session-uuid %1 - %2 command(s)\\n\")\n                 .arg(e.cmdUuid.toBase64().data()).arg(e.cmdCount);\n        }\n    }\n\n    if(m_cmdStats.cwdCmdCounts().size() >= m_minCountOfStats){\n        s.setLineStart(m_indentlvl0);\n        s << qtr(\"\\nWorking directories with most commands:\\n\");\n        s.setLineStart(m_indentlvl1);\n        for(const auto& e : m_cmdStats.cwdCmdCounts()){\n            s << qtr(\"%1 command(s) at %2\\n\")\n                 .arg(e.cmdCount).arg(e.workingDir);\n        }\n    }\n\n    if(m_cmdStats.dirIoCounts().size() >= m_minCountOfStats){\n        s.setLineStart(m_indentlvl0);\n        s << qtr(\"\\nDirectories with most input/output-activity:\\n\");\n        s.setLineStart(m_indentlvl1);\n        for(const auto& e : m_cmdStats.dirIoCounts()){\n            s << qtr(\"Total %1 (%2 written, %3 read) files at %4\\n\")\n                 .arg(e.writeCount + e.readCount).arg(e.writeCount)\n                 .arg(e.readCount).arg(e.dir);\n        }\n    }\n\n    if(m_countOfRestoredFiles > 0){\n        s.setLineStart(m_indentlvl0);\n        s << qtr(\"%1 file(s) restored at %2\").arg(m_countOfRestoredFiles)\n             .arg(m_restoreDir.absolutePath()) << \"\\n\";\n    }\n}\n\n\nvoid\nCommandPrinterHuman::printReadFileEventEvtlRestore\n(const CommandInfo &cmd, QFormattedStream& s,\n const FileReadInfo& f, const QString &cmdIdStr){\n    auto fStatus = (reportFileStatus()) ? \" \"+f.currentStatus(cmd) : \"\";\n    s.setLineStart(m_indentlvl2);\n    s << pathJoinFilename(f.path, f.name)\n      << \"(\" + m_userStrConv.bytesToHuman(f.size) + \")\"\n      << qtr(\"Hash:\") << ((f.hash.isNull()) ? \"-\" : QString::number(f.hash.value()))\n      << \"id\" << QString::number(f.idInDb) + fStatus + \"\\n\";\n    if(! f.isStoredToDisk){\n        // since shournal 2.1 it is possible to log only meta-information about\n        // read files without storing them in the read files dir.\n        return;\n    }\n    if(m_restoreReadFiles){\n        createRestoreTopleveDirIfNeeded();\n    }\n\n    bool printFileContentSuccess {false};\n    QFileThrow file(m_storedFiles.mkPathStringToStoredReadFile(f));\n    try {\n        file.open(QFile::OpenModeFlag::ReadOnly);\n        auto mtype = m_mimedb.mimeTypeForData(&file);\n        s.setLineStart(m_indentlvl3);\n        if(! mtype.inherits(\"text/plain\")){\n            s << qtr(\"Not printing content (mimetype %1)\").arg(mtype.name()) << \"\\n\";\n            return;\n        }\n        printReadFile(s, file);\n        printFileContentSuccess = true;\n\n        if(m_restoreReadFiles){\n            restoreReadFile_safe(f, cmdIdStr, file);\n        }\n    } catch (const QExcIo& e) {\n        if(printFileContentSuccess){\n            logWarning << e.what();\n        } else {\n            logWarning << qtr(\"Error while printing read file '%1' with id %2: %3\")\n                          .arg(f.name).arg(f.idInDb).arg(e.descrip());\n        }\n        return;\n    }\n}\n\n\nvoid CommandPrinterHuman::printReadFile(QFormattedStream &s, QFile &f)\n{\n    QTextStream fstream(&f);\n    int nLinesPrinted = 0;\n    while(! fstream.atEnd()){\n        QString line = fstream.readLine();\n        if(line.isEmpty()){\n            continue;\n        }\n        s << line << \"\\n\";\n        if(++nLinesPrinted >= m_maxCountOfReadFileLines){\n            s << \"...\\n\";\n            break;\n        }\n    }\n}\n\nvoid CommandPrinterHuman::printWriteInfos\n(const CommandInfo& cmd, QFormattedStream &s)\n{\n    auto & fileWriteInfos = cmd.fileWriteInfos;\n    if(fileWriteInfos.isEmpty()){\n        return;\n    }\n    s.setLineStart(m_indentlvl1);\n    const char dotOrColon = (m_maxCountWfiles == 0) ? '.' : ':';\n    const QString fileStr = (fileWriteInfos.size() == 1) ? \"file\" : \"files\";\n    s << qtr(\"%1 written %2%3\\n\").arg(fileWriteInfos.size()).arg(fileStr).arg(dotOrColon);\n    s.setLineStart(m_indentlvl2);\n    int counter = 0;\n    for(const auto& f : fileWriteInfos){\n        if(counter >= m_maxCountWfiles){\n            if(counter > 0){\n                s << qtr(\"... and %1 more files.\\n\")\n                     .arg(fileWriteInfos.size() - m_maxCountWfiles);\n            }\n            break;\n        }\n        auto fStatus = (reportFileStatus()) ? \" \"+f.currentStatus(cmd) : \"\";\n        s << pathJoinFilename(f.path, f.name)\n          << \"(\" + m_userStrConv.bytesToHuman(f.size) + \")\"\n          << qtr(\"Hash:\") << ((f.hash.isNull()) ? \"-\" : QString::number(f.hash.value())) +\n             fStatus + \"\\n\";\n        ++counter;\n    }\n\n}\n\nvoid CommandPrinterHuman::printReadInfos(QFormattedStream &s, const CommandInfo &cmd)\n{\n    if(cmd.fileReadInfos.isEmpty()){\n        return;\n    }\n    s.setLineStart(m_indentlvl1);\n\n    const char dotOrColon = (m_maxCountRfiles == 0) ? '.' : ':';\n    const QString fileStr = (cmd.fileReadInfos.size() == 1) ? \"file\" : \"files\";\n    s << qtr(\"%1 read %2%3\\n\").arg(cmd.fileReadInfos.size()).arg(fileStr).arg(dotOrColon);\n    const QString cmdIdStr = QString::number(cmd.idInDb);\n    int counter = 0;\n    for(const auto & f : cmd.fileReadInfos){\n        if(counter >= m_maxCountRfiles){\n            if(counter > 0){\n                s << qtr(\"... and %1 more files.\\n\")\n                     .arg(cmd.fileReadInfos.size() - m_maxCountRfiles);\n            }\n            break;\n        }\n        printReadFileEventEvtlRestore(cmd, s, f, cmdIdStr);\n        ++counter;\n    }\n}\n\n\n\n/// Do not print more than that number of lines for each read file\nvoid CommandPrinterHuman::setMaxCountOfReadFileLines(int maxCountOfReadFileLines)\n{\n    m_maxCountOfReadFileLines = maxCountOfReadFileLines;\n}\n"
  },
  {
    "path": "src/shournal/command_printer_human.h",
    "content": "#pragma once\n\n#include \"command_printer.h\"\n#include \"commandinfo.h\"\n\n#include <QMimeDatabase>\n\n\nclass CommandPrinterHuman : public CommandPrinter\n{\npublic:\n    CommandPrinterHuman() = default;\n\n    void printCommandInfosEvtlRestore(std::unique_ptr<CommandQueryIterator>& cmdIter) override;\n\n    virtual void setMaxCountOfReadFileLines(int maxCountOfReadFileLines);\n\nprotected:\n    Q_DISABLE_COPY(CommandPrinterHuman)\n\n    void printReadFileEventEvtlRestore(const CommandInfo &cmd, QFormattedStream& s,\n                                       const FileReadInfo& readInfo,\n                                       const QString& cmdIdStr);\n    void printReadFile(QFormattedStream& s, QFile& f);\n\n    void printWriteInfos(const CommandInfo &cmd, QFormattedStream& s);\n    void printReadInfos(QFormattedStream& s, const CommandInfo& cmd);\n\n    QMimeDatabase m_mimedb;\n    const QString m_indentlvl0 {\"\"};\n    const QString m_indentlvl1 {\"  \"};\n    const QString m_indentlvl2 {\"     \"};\n    const QString m_indentlvl3 {\"          \"};\n    int m_maxCountOfReadFileLines {5};\n    Conversions m_userStrConv;\n};\n"
  },
  {
    "path": "src/shournal/command_printer_json.cpp",
    "content": "﻿\n#include <QJsonObject>\n#include <QJsonDocument>\n\n#include \"command_printer_json.h\"\n#include \"command_query_iterator.h\"\n#include \"logger.h\"\n#include \"util.h\"\n\n\nvoid CommandPrinterJson::printCommandInfosEvtlRestore(std::unique_ptr<CommandQueryIterator> &cmdIter)\n{\n    if(! m_outputFile.isOpen()){\n        m_outputFile.open(QFile::OpenModeFlag::WriteOnly);\n    }\n\n    QTextStream outstream(&m_outputFile);\n    {\n        QJsonObject header;\n        header[\"pathToReadFiles\"] = StoredFiles::getReadFilesDir();\n        QJsonDocument doc(header);\n        outstream << \"HEADER:\" << doc.toJson(QJsonDocument::Compact) << \"\\n\";\n    }\n\n    CmdJsonWriteCfg jsonCfg(true);\n    jsonCfg.fileStatus = this->reportFileStatus();\n    while(cmdIter->next()){\n        QJsonObject cmdObject;\n        cmdIter->value().write(cmdObject, false, jsonCfg);\n        QJsonDocument doc(cmdObject);\n        outstream << \"COMMAND:\" << doc.toJson(QJsonDocument::Compact) << \"\\n\";\n\n        if(! m_restoreReadFiles){\n            continue;\n        }\n        for(const auto& readInfo : cmdIter->value().fileReadInfos){\n            if(readInfo.isStoredToDisk){\n                createRestoreTopleveDirIfNeeded();\n                restoreReadFile_safe(readInfo, QString::number(cmdIter->value().idInDb));\n            }\n        }\n    }\n\n    {\n        QJsonObject footer;\n        footer[\"restorePath\"] =QJsonValue::fromVariant(\n                    (m_countOfRestoredFiles == 0) ? QVariant() : m_restoreDir.absolutePath() );\n        footer[\"countOfRestoredFiles\"] = m_countOfRestoredFiles;\n        QJsonDocument doc(footer);\n        outstream << \"FOOTER:\" << doc.toJson(QJsonDocument::Compact) << \"\\n\";\n    }\n}\n"
  },
  {
    "path": "src/shournal/command_printer_json.h",
    "content": "#pragma once\n\n#include \"command_printer.h\"\n\nclass CommandPrinterJson : public CommandPrinter\n{\npublic:\n    CommandPrinterJson() = default;\n\n    void printCommandInfosEvtlRestore(std::unique_ptr<CommandQueryIterator>& cmdIter) override;\n\nprivate:\n    Q_DISABLE_COPY(CommandPrinterJson)\n};\n\n"
  },
  {
    "path": "src/shournal/shournal.cpp",
    "content": "#ifndef _GNU_SOURCE\n    #define _GNU_SOURCE\n#endif\n\n#include <csignal>\n\n#include <QtCore>\n#include <QCoreApplication>\n#include <QMimeDatabase>\n#include <cassert>\n\n#include \"qoptargparse.h\"\n#include \"excoptargparse.h\"\n#include \"excos.h\"\n#include \"logger.h\"\n\n#include \"os.h\"\n#include \"exccfg.h\"\n#include \"cpp_exit.h\"\n#include \"settings.h\"\n#include \"db_connection.h\"\n#include \"util.h\"\n#include \"cleanupresource.h\"\n#include \"qoutstream.h\"\n#include \"util.h\"\n#include \"translation.h\"\n#include \"app.h\"\n#include \"qsqlquerythrow.h\"\n#include \"argcontrol_dbquery.h\"\n#include \"argcontrol_dbdelete.h\"\n#include \"qexcdatabase.h\"\n#include \"conversions.h\"\n#include \"console_dialog.h\"\n#include \"qfilethrow.h\"\n#include \"shournal_run_common.h\"\n\nusing namespace shournal_run_common;\n\n/// Uncaught exception handler\nvoid onterminate() {\n    try {\n        auto unknown = std::current_exception();\n        if (unknown) {\n            std::rethrow_exception(unknown);\n        }\n    } catch (const std::exception& e) {\n        logCritical << e.what() << \"\\n\";\n    } catch (...) {\n        logCritical << \"unknown exception occurred\\n\";\n    }\n}\n\n[[noreturn]]\nvoid execShournalRun(const QByteArray& backendFilename,\n                     QOptArg::RawValues_t &cargs, bool withinOrigMountspace,\n                     QVarLengthArray<QOptArg*> forwardArgs);\n\n\nint shournal_main(int argc, char *argv[])\n{\n    app::setupNameAndVersion(app::SHOURNAL);\n\n    if(! translation::init()){\n        QIErr() << \"Failed to initialize translation\";\n    }\n\n    logger::setup(app::CURRENT_NAME);\n\n    std::set_terminate(onterminate);\n    if(! shournal_common_init()){\n        logCritical << qtr(\"Fatal error: failed to initialize custom Qt conversion functions\");\n        cpp_exit(1);\n    }\n\n    // ignore first arg (command to this app)\n    --argc;\n    ++argv;\n\n    auto & sets = Settings::instance();\n\n    QOptArgParse parser;\n    parser.setHelpIntroduction(qtr(\"Launch a command and recursively observe in specific \"\n                                   \"directories which files \"\n                                   \"were modified by that process and its children. \") + \"\\n\");\n    QOptArg argVersion(\"v\", \"version\", qtr(\"Display version\"), false);\n    parser.addArg(&argVersion);\n\n    QOptArg argExec(\"e\", \"exec\",\n                    qtr(\"Execute and observe the passed program \"\n                        \"and its arguments (this argument has to be last). \"\n                        \"All further parameters starting with a minus \"\n                        \"are considered options for the shournal-run* backend until \"\n                        \"double dash -- or the first command \"\n                        \"(not starting with a minus) occurs, e.g.\\n\"\n                        \"shournal -e --print-summary echo foobar\\n\"\n                        \"-> --print-summary is an argument for shournal-run.\\n\"\n                        \"shournal --exec-filename /bin/bash -e -- -bash\\n\"\n                        \"on the other hand can be used for commands starting \"\n                        \"with a dash (e.g. login-shells).\"), false);\n    argExec.setFinalizeFlag(true);\n    parser.addArg(&argExec);\n\n    QOptArg argExecFilename(\"\", \"exec-filename\", qtr(\"This is an advanced option. \"\n                                                     \"In most cases the first argument of a \"\n                                                     \"program is the program name. For \"\n                                                     \"example for login-shells this does \"\n                                                     \"not have to be the case. If this \"\n                                                     \"argument is provided, that filename \"\n                                                     \"is used instead of argv[0]\"));\n    parser.addArg(&argExecFilename);\n    argExecFilename.addRequiredArg(&argExec);\n\n\n    QOptArg argBackend(\"\", \"backend-filename\",\n                       qtr(\"When executing a command (option %1) use \"\n                           \"the given filename as observation backend-command\")\n                           .arg(argExec.name()));\n    parser.addArg(&argBackend);\n    argBackend.addRequiredArg(&argExec);\n\n\n\n    QOptArg argMsenterOrig(\"\", \"msenter-orig-mountspace\",\n                           qtr(\"Must be passed along with '%1'. Execute the \"\n                               \"given command in the 'original' mount-namespace \"\n                               \"created the first time %2 observed a process.\")\n                            .arg(argExec.name(), app::SHOURNAL_RUN), false);\n    argMsenterOrig.setInternalOnly(true);\n    argMsenterOrig.addRequiredArg(&argExec);\n    parser.addArg(&argMsenterOrig);\n\n    QOptArg argEditCfg(\"c\", \"edit-cfg\", qtr(\"Edit the config-file at %1 \"\n                                            \"with your favourite text-editor:\\n\"\n                                            \"export EDITOR='...'\").arg(sets.cfgFilepath()),\n                                             false);\n    parser.addArg(&argEditCfg);\n\n    QOptArg argQuery(\"q\", \"query\", qtr(\"Query %1's database for activities. Type \"\n                                       \"--query --help for details.\").arg(app::SHOURNAL), false);\n    argQuery.setFinalizeFlag(true);\n    parser.addArg(&argQuery);\n\n    QOptArg argDelete(\"\", \"delete\", qtr(\"delete (parts of) %1's command history from the\"\n                                         \" database. Type \"\n                                       \"--delete --help for details.\").arg(app::SHOURNAL), false);\n    argDelete.setFinalizeFlag(true);\n    parser.addArg(&argDelete);\n\n    QOptArg argPrintMime(\"\", \"print-mime\", qtr(\"Print the mimetpye of an existing file(name) \"\n                                               \"which can be used in shournal's config-\"\n                                               \"file for setting file-event-rules.\"));\n    parser.addArg(&argPrintMime);\n\n\n    QOptArg argVerbosity(\"\", \"verbosity\", qtr(\"How much shall be printed to stderr. Note that \"\n                                              \"for 'dbg' shournal must not be a 'Release'-build, \"\n                                              \"dbug-messages are lost in Release-mode.\"));\n    argVerbosity.setAllowedOptions(app::VERBOSITIES);\n    parser.addArg(&argVerbosity);\n\n    QOptArg argValidateSettings(\"\", \"validate-settings\",\n                                qtr(\"If the settings-file is well formed, \"\n                                    \"return 0, else print an error and return \"\n                                    \"a nonzero value\"), false);\n    parser.addArg(&argValidateSettings);\n\n    QOptArg argLsOurPaths(\"\", \"ls-paths\",\n                                qtr(\"Print shournal's application paths (database-dir, etc.)\"), false);\n\n    parser.addArg(&argLsOurPaths);\n\n    auto argCfgDir = mkarg_cfgdir();\n    parser.addArg(&argCfgDir);\n    auto argDataDir = mkarg_datadir();\n    parser.addArg(&argDataDir);\n\n    // Forward these to shournal-run:\n    QVarLengthArray<QOptArg*> forwardArgs = {&argVerbosity, &argExecFilename, &argCfgDir,\n                                           &argDataDir};\n\n    try {\n        parser.parse(argc, argv);\n        auto & sets = Settings::instance();\n        if(argCfgDir.wasParsed()){\n            sets.setUserCfgDir(argCfgDir.getValue<QString>());\n        }\n        if(argDataDir.wasParsed()){\n            sets.setUserDataDir(argDataDir.getValue<QString>());\n        }\n\n        if(argVerbosity.wasParsed()){\n            QByteArray verbosity = argVerbosity.getOptions(1).first().toLocal8Bit();\n            logger::setVerbosityLevel(verbosity.constData());\n        } else {\n            logger::setVerbosityLevel(QtMsgType::QtWarningMsg);\n        }\n\n        if(argExec.wasParsed()){\n            auto backendFilename = argBackend.getValue<QString>();\n            if(backendFilename.isEmpty()){\n                backendFilename = sets.chooseShournalRunBackend();\n                if(backendFilename.isEmpty()){\n                    QIErr() << qtr(\"No backend-filename given and no valid \"\n                                   \"backend found - exiting...\");\n                    cpp_exit(1);\n                }\n            }\n            execShournalRun(backendFilename.toLocal8Bit(),\n                            parser.rest(), argMsenterOrig.wasParsed(),\n                            forwardArgs);\n        }\n\n        if(argVersion.wasParsed()){\n            QOut() << app::SHOURNAL << qtr(\" version \") << app::version().toString() << \"\\n\";\n            cpp_exit(0);\n        }\n\n        logger::enableLogToFile(app::SHOURNAL);\n\n        sets.load();\n        if(argValidateSettings.wasParsed()){\n            cpp_exit(0);\n        }\n\n        if(argQuery.wasParsed()){\n            argcontol_dbquery::parse(parser.rest().len, parser.rest().argv);\n            // never get here\n        }\n\n        if(argDelete.wasParsed()){\n            argcontrol_dbdelete::parse(parser.rest().len, parser.rest().argv);\n            // never get here\n        }\n\n        if(argEditCfg.wasParsed()){\n            int ret = console_dialog::openFileInExternalEditor(sets.cfgFilepath());\n            cpp_exit(ret);\n        }\n\n        if(argPrintMime.wasParsed()){\n            QFileThrow f(argPrintMime.getValue<QString>());\n            f.open(QFile::OpenModeFlag::ReadOnly);\n            auto mtype = QMimeDatabase().mimeTypeForData(&f);\n            QOut() << mtype.name() << \"\\n\";\n            cpp_exit(0);\n        }\n\n        if(argLsOurPaths.wasParsed()){\n            QOut() << qtr(\"Database directory: \") << db_connection::getDatabaseDir() << \"\\n\"\n                   << qtr(\"Configuration directory: \")\n                           << splitAbsPath(sets.cfgFilepath()).first << \"\\n\"\n                   << qtr(\"Cache directory (log-files): \") << logger::logDir() << \"\\n\" ;\n            cpp_exit(0);\n        }\n\n\n        if(parser.rest().len != 0){\n            QIErr() << qtr(\"Invalid parameters passed: %1.\\n\"\n                           \"Show help with --help\").\n                       arg( argvToQStr(parser.rest().len, parser.rest().argv));\n             cpp_exit(1);\n        }\n        QIErr() << \"No action specified\";\n\n    } catch (const ExcOptArgParse & ex) {\n        QIErr() << qtr(\"Commandline seems to be erroneous:\")\n                << ex.descrip();\n    } catch (const ExcConversion & ex) {\n        QIErr() << ex.descrip();\n    }\n    catch(const qsimplecfg::ExcCfg & ex){\n        QIErr() << qtr(\"Failed to load config file: \") << ex.descrip();\n    } catch (const QExcIo& ex){\n        QIErr() << qtr(\"IO-operation failed: \") << ex.descrip();\n    } catch (const os::ExcOs& ex){\n        QIErr() << ex.what();\n    }\n    cpp_exit(1);\n}\n\n\n\nvoid execShournalRun(const QByteArray& backendFilename,\n                     QOptArg::RawValues_t& cargs , bool withinOrigMountspace,\n                     QVarLengthArray<QOptArg*> forwardArgs){\n    // setuid-programs change some parts of the environment for security reasons.\n    // Therefor, pass the environment via argv and apply it later (after setuid\n    // to original user)\n\n    QVarLengthArray<const char*, 8192> args;\n    args.push_back(backendFilename);\n    if(withinOrigMountspace){\n        args.push_back(\"--msenter-orig-mountspace\");\n    }\n\n    QByteArray verbosityStr;\n    QVarLengthArray<QByteArray> forwardArgsBuf;\n    for(QOptArg* a : forwardArgs){\n        if(a->wasParsed()){\n            // we need another buffer for the char* args array.\n            forwardArgsBuf.push_back(a->name().toLocal8Bit());\n            args.push_back(forwardArgsBuf.last());\n            args.push_back(a->vals().argv[0]);\n        }\n    }\n\n    const char* tmpdir = getenv(\"TMPDIR\");\n    if(tmpdir != nullptr){\n        args.push_back(\"--tmpdir\");\n        args.push_back(tmpdir);\n    }\n\n    args.push_back(\"--env\");\n    // first value after --env is its size, which we don't know yet.\n    args.push_back(\"DUMMY\");\n    int envSizeIdx = args.size() -1;\n    for (char **env = environ; *env != nullptr; env++) {\n        args.push_back(*env);\n    }\n    // optimization in shournal-run...\n    args.push_back(\"SHOURNAL_DUMMY_NULL=1\");\n    std::string envSize = std::to_string(args.size() - envSizeIdx - 1);\n    args[envSizeIdx] = envSize.c_str();\n\n\n    // As long as arguments start with a minus those\n    // are passed as options to the shournal-run backend, e.g.\n    // shournal-run -e --no-db --print-summary echo ok\n    int execIdx = -1;\n    for(int i=0; i < cargs.len; i++){\n        if(execIdx != -1){\n            args.push_back(cargs.argv[i]);\n            continue;\n        }\n        if(strcmp(cargs.argv[i], \"--\") == 0){\n            // option terminator -> all further options\n            // will be directly passed to our backend.\n            execIdx = args.size();\n            args.push_back(\"--exec\");\n        } else if(cargs.argv[i][0] == '-'){\n            // option for shournal-run\n            args.push_back(cargs.argv[i]);\n        } else {\n            // first non-option.\n            execIdx = args.size();\n            args.push_back(\"--exec\");\n            args.push_back(cargs.argv[i]);\n        }\n    }\n    if(execIdx == -1 || execIdx == args.size()-1){\n        QIErr() << qtr(\"No executable found after parsing the commandline. \"\n                       \"Note that for commands starting with dashes (e.g. login-shells) \"\n                       \"--exec has to be terminated by double-dash --\\n\"\n                       \"Current arguments:\");\n        for(const auto& arg : args){\n            QErr() << arg << \" \";\n        }\n        QErr() << \"\\n\";\n        cpp_exit(1);\n    }\n\n    args.push_back(nullptr);\n    os::exec(args);\n}\n\nint main(int argc, char *argv[])\n{\n    try {\n        shournal_main(argc, argv);\n    } catch (const ExcCppExit& e) {\n        return e.ret();\n    }\n}\n\n"
  },
  {
    "path": "src/shournal-run/CMakeLists.txt",
    "content": "\n\nadd_executable(shournal-run\n    shournal-run.cpp\n    fifocom.cpp\n    filewatcher_shournalk.cpp\n    mark_helper.cpp\n    shournalk_ctrl.c\n)\n\ntarget_link_libraries(shournal-run\n    Qt5::Core\n    Qt5::Sql\n    lib_shournal_common\n    pthread\n)\n\n\ninstall(\n    TARGETS shournal-run\n    RUNTIME DESTINATION bin\n    PERMISSIONS\n                OWNER_READ OWNER_WRITE OWNER_EXECUTE\n                GROUP_READ GROUP_EXECUTE\n                WORLD_READ WORLD_EXECUTE\n)\n\n"
  },
  {
    "path": "src/shournal-run/fifocom.cpp",
    "content": "\n#include \"fifocom.h\"\n\n#include <linux/limits.h>\n#include <cassert>\n#include <QJsonDocument>\n#include <QJsonObject>\n\n#include \"stdiocpp.h\"\n#include \"logger.h\"\n#include \"cleanupresource.h\"\n#include \"os.h\"\n\nFifoCom::FifoCom(int fifo) :\n    m_fifofd(fifo)\n{\n    m_linebuf.reserve(PIPE_BUF);\n}\n\n/// Read a simple json message from our fifo containing only\n/// of messsage-type (int >= 0) and data-field.\n/// @param data: is filled with the data field of the message\n/// @return the message type or -1.\nint FifoCom::readJsonLine(QString &data)\n{\n    if(! readLineRaw()){\n        return -1;\n    }\n    auto finallyClearLinebuf = finally([&] {\n        m_linebuf.clear();\n    });\n\n    QJsonDocument d = QJsonDocument::fromJson(m_linebuf);\n    QJsonObject rooObj = d.object();\n    int messageType = rooObj.value(\"msgType\").toInt(-1);\n    if(messageType == -1){\n        logWarning << \"invalid fifo-message received (buggy client?):\" << m_linebuf;\n        return -1;\n    }\n    data = rooObj.value(\"data\").toString();\n\n    return messageType;\n}\n\n\n/// Buffered line read from non-blocking fifo.\n/// push_back to our line buffer until we find a NEWLINE.\n/// To be compliant with O_NONBLOCK, return false on EAGAIN.\n/// @return if true, m_linebuf contains the read line.\nbool FifoCom::readLineRaw()\n{\n    bool foundNewLine = false;\n    if(m_bufIdx == 0){\n        m_bufTmp.resize(PIPE_BUF);\n        auto count = read(m_fifofd, m_bufTmp.data(), m_bufTmp.size());\n        if(count == -1){\n            if(errno == EAGAIN || errno == EWOULDBLOCK){\n                return false;\n            }\n            throw os::ExcOs(\"read from fifo failed:\");\n        }\n        m_bufTmp.resize(int(count));\n    }\n\n    for(; m_bufIdx < m_bufTmp.size(); m_bufIdx++){\n        if(m_bufTmp[m_bufIdx] == '\\n'){\n            foundNewLine = true;\n            m_bufIdx++;\n            break;\n        }\n        m_linebuf.push_back(m_bufTmp[m_bufIdx]);\n    }\n\n    if(m_bufIdx >= m_bufTmp.size()){\n        // whole buffer consumed\n        m_bufIdx = 0;\n    }\n    if(! foundNewLine){\n        // most likely the user sent a message greater than\n        // PIPE_BUF. Get the rest on next call (do not clear\n        // buffer)\n        return false;\n    }\n    return true;\n}\n\n\n"
  },
  {
    "path": "src/shournal-run/fifocom.h",
    "content": "#pragma once\n\n#include <QString>\n#include \"qfilethrow.h\"\n\n/// Fifo-communication. Parse (json)\n/// messages sent to a given shournal-run instance\nclass FifoCom\n{\npublic:\n    FifoCom(int fifo);\n\n    int readJsonLine(QString& data);\n\nprivate:\n    bool readLineRaw();\n\n    int m_fifofd;\n    QByteArray m_bufTmp;\n    int m_bufIdx{0};\n    QByteArray m_linebuf;\n\n};\n\n"
  },
  {
    "path": "src/shournal-run/filewatcher_shournalk.cpp",
    "content": "\n#include <QTemporaryFile>\n\n#include \"filewatcher_shournalk.h\"\n\n#include <sys/resource.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <sys/user.h>\n#include <unistd.h>\n#include <fcntl.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <poll.h>\n#include <string.h>\n#include <errno.h>\n#include <assert.h>\n\n\n#include \"app.h\"\n#include \"cefd.h\"\n#include \"cpp_exit.h\"\n#include \"conversions.h\"\n#include \"commandinfo.h\"\n#include \"db_controller.h\"\n#include \"cleanupresource.h\"\n#include \"fdentries.h\"\n#include \"fifocom.h\"\n#include \"fileevents.h\"\n#include \"logger.h\"\n#include \"mark_helper.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"qoutstream.h\"\n#include \"settings.h\"\n#include \"shournalk_ctrl.h\"\n#include \"shournal_run_common.h\"\n#include \"stdiocpp.h\"\n#include \"subprocess.h\"\n#include \"translation.h\"\n\n\nconst int PRIO_DATABASE_FLUSH = 10;\nconst pid_t INVALID_PID = std::numeric_limits<pid_t>::max();\n\n\nusing subprocess::Subprocess;\nusing std::shared_ptr;\nusing std::make_shared;\n\nusing ShournalK_ptr = Filewatcher_shournalk::ShournalK_ptr;\n\n\nstatic void handleFifoEvent(shared_ptr<FifoCom>& fifoCom,\n                            CommandInfo* cmdInfo,\n                            ShournalK_ptr& shournalk){\n    enum { FIFO_RETURN_VAL=0, FIFO_UNMARK_PID};\n\n\n    QString data;\n    for (int i=0; ; i++) {\n        int msgType = fifoCom->readJsonLine(data);\n        if(msgType == -1){\n            return;\n        }\n        switch (msgType) {\n        case FIFO_RETURN_VAL:\n            if(! qVariantTo<int>(data, &cmdInfo->returnVal)){\n                logWarning <<  qtr(\"bad return value '%1' received\").arg(data);\n            }\n            break;        \n        case FIFO_UNMARK_PID:\n            pid_t pid;\n            if(! qVariantTo<int>(data, &pid)){\n                logWarning <<  qtr(\"bad pid '%1' received\").arg(data);\n            } else {\n                try {\n                    shournalk->removePid(pid);\n                } catch (const ExcShournalk& ex) {\n                    logWarning << ex.what();\n                }\n            }\n            break;\n        default:\n            logWarning << \"Invalid fifo-message received:\"\n                       << msgType << \"with data\" << data;\n            break;\n        }\n    }\n\n}\n\nstatic int do_polling(ShournalK_ptr& shournalk,\n                      struct shournalk_run_result* run_result,\n                      const QByteArray& fifopath,\n                      CommandInfo* cmdInfo){\n    int fifo = -1;\n    auto finallyCloseFifo = finally([&fifo] {\n        if(fifo != -1) close(fifo);\n    });\n    // Protect client from deadlock: first delete, then\n    // close (\"finally\" reverses call order).\n    auto finallydelFifo = finally([&fifopath] {\n        if(!fifopath.isEmpty()){\n            // fail silently, as the shell integration\n            // might be faster than us\n            remove(fifopath);\n        }\n    });\n\n    shared_ptr<FifoCom> fifoCom;\n    QVector<pollfd> fds;\n\n    shournalk->preparePollOnce();\n    pollfd shournalkfd;\n    shournalkfd.fd = shournalk->kgrp()->pipe_readend;\n    shournalkfd.events = POLLIN;\n    fds.push_back(shournalkfd);\n\n    if(! fifopath.isEmpty()){\n        pollfd fd;\n        // open RDWR, to correctly get EAGAIN in case of no (other) writer.\n        fifo = os::open(fifopath, os::OPEN_RDWR | os::OPEN_NONBLOCK | os::OPEN_EXCL);\n        fd.fd = fifo;\n        fd.events = POLLIN;\n        fds.push_back(fd);\n\n        fifoCom = make_shared<FifoCom>(fifo);\n    }\n\n    while (1) {\n        int poll_num = poll(fds.data(), nfds_t(fds.size()), -1);\n        if (poll_num == -1) {\n            if (errno == EINTR){     // Interrupted by a signal\n                continue;            // Restart poll()\n            }\n            logCritical << \"Error during poll: \" << translation::strerror_l();\n            return errno;\n        }\n\n        // 0 only on timeout, which is infinite\n        assert(poll_num != 0);\n\n        if (fds[0].revents & POLLIN) {\n            auto read_count = os::read(shournalk->kgrp()->pipe_readend, run_result,\n                                       sizeof(struct shournalk_run_result));\n            if(read_count != sizeof(struct shournalk_run_result)){\n                logCritical << qtr(\"Received bad run-result from kernel backend: \"\n                                   \"expected %1 bytes but received %2.\")\n                               .arg(sizeof(struct shournalk_run_result)).arg(read_count);\n                return EPIPE;\n            }\n            return 0;\n        }\n        assert(fds.size() > 1);\n\n        if(fds[1].revents & POLLIN){\n            handleFifoEvent(fifoCom, cmdInfo, shournalk);\n        } else {\n            // can never happen, because we opened the\n            // fifo RDWR, so we get no events if a writer closes\n            // (which is not the case if opened RDONLY)\n            assert(false);\n        }\n    }\n}\n\n\n\nQByteArray Filewatcher_shournalk::fifopathForPid(pid_t pid)\n{\n    QByteArray fifopath = pathJoinFilename(QDir::tempPath().toUtf8(),\n                                \"shournal-run-fifo-\" + QByteArray::number(pid));\n    return fifopath;\n}\n\nFilewatcher_shournalk::Filewatcher_shournalk()\n{\n\n}\n\n\nvoid Filewatcher_shournalk::setArgv(char **argv, int argc)\n{\n    m_commandArgv = argv;\n    m_commandArgc = argc;\n}\n\nvoid Filewatcher_shournalk::setPid(const pid_t &pid)\n{\n    m_pid = pid;\n}\n\n\nvoid Filewatcher_shournalk::setCommandFilename(char *commandFilename)\n{\n    m_commandFilename = commandFilename;\n}\n\nvoid Filewatcher_shournalk::setStoreToDatabase(bool storeToDatabase)\n{\n    m_storeToDatabase = storeToDatabase;\n}\n\nvoid Filewatcher_shournalk::setShellSessionUUID(const QByteArray &shellSessionUUID)\n{\n    m_shellSessionUUID = shellSessionUUID;\n}\n\nvoid Filewatcher_shournalk::setForkIntoBackground(bool value)\n{\n    m_forkIntoBackground = value;\n}\n\nvoid Filewatcher_shournalk::setCmdString(const QString &cmdString)\n{\n    m_cmdString = cmdString;\n}\n\nvoid Filewatcher_shournalk::setFifoname(const QByteArray &fifoname)\n{\n    m_fifoname = fifoname;\n}\n\nvoid Filewatcher_shournalk::setPrintSummary(bool printSummary)\n{\n    m_printSummary = printSummary;\n}\n\n\nCommandInfo Filewatcher_shournalk::runExec(ShournalK_ptr &shournalk,\n                                           CEfd& toplvlEfd)\n{\n    CommandInfo cmdInfo =  CommandInfo::fromLocalEnv();\n    cmdInfo.sessionInfo.uuid = m_shellSessionUUID;\n\n    if(m_commandFilename != nullptr){\n        cmdInfo.text += QString(m_commandFilename) + \" \";\n        // TODO: rather store cmdInfo.text only from &m_commandArgv[1] in case\n        // of m_commandFilename != null ?\n    }\n    cmdInfo.text += argvToQStr(m_commandArgc, m_commandArgv);\n\n    CEfd cefd;\n    Subprocess proc;\n    proc.setWaitForSetup(false);\n    proc.setCallbackAsChild([&cefd]{\n        // Block until parent process did the setup\n        cefd.recvMsg();\n    });\n\n    const char* cmdFilename = (m_commandFilename == nullptr) ? m_commandArgv[0]\n            : m_commandFilename;\n    cmdInfo.startTime = QDateTime::currentDateTime();\n    proc.call(cmdFilename, m_commandArgv);\n\n    uint64_t markRet;\n    try {\n        shournalk->doMark(proc.lastPid());\n        markRet = CEfd::MSG_OK;\n    } catch (const ExcShournalk& ex) {\n        logWarning << ex.descrip();\n        markRet = CEfd::MSG_FAIL;\n    }\n    toplvlEfd.sendMsg(markRet);\n\n    cefd.sendMsg(markRet);\n    cefd.teardown();\n\n    try {\n        cmdInfo.returnVal = proc.waitFinish();\n    } catch (const os::ExcProcessExitNotNormal& ex) {\n        // return typical shell cpp_exit code\n        cmdInfo.returnVal = 128 + ex.status();\n    }\n    // do not set endTime here, but after poll for kgrp, so\n    // all background-processes finished\n    if(markRet != CEfd::MSG_OK){\n        cpp_exit(cmdInfo.returnVal);\n    }\n\n    return cmdInfo;\n}\n\nCommandInfo Filewatcher_shournalk::runMarkPid(ShournalK_ptr &shournalk, CEfd &toplvlEfd)\n{\n    assert(! m_cmdString.isEmpty());\n\n    CommandInfo cmdInfo =  CommandInfo::fromLocalEnv();\n    cmdInfo.sessionInfo.uuid = m_shellSessionUUID;\n    // Start-time will likely be overwritten later\n    cmdInfo.text = m_cmdString;\n    cmdInfo.startTime = QDateTime::currentDateTime();\n\n    try {\n        shournalk->doMark(m_pid, true);\n        toplvlEfd.sendMsg(CEfd::MSG_OK);\n    } catch (const ExcShournalk& ex) {\n        logWarning << ex.descrip();\n        toplvlEfd.sendMsg(CEfd::MSG_FAIL);\n        cpp_exit(1);\n    }\n    return cmdInfo;\n}\n\n\nvoid Filewatcher_shournalk::run()\n{\n    auto shournalk = make_shared<ShournalkControl>();\n\n    CommandInfo cmdInfo;\n\n    CEfd toplvlEfd;\n    if(m_forkIntoBackground){\n        // parent exits, child continues in new sid.\n        // We wait for child to finish setup.\n        if(os::fork() != 0){\n            logDebug << \"forking into background\";\n            auto ret = (toplvlEfd.recvMsg() == CEfd::MSG_OK) ? 0 : 1;\n            exit(ret);\n        }\n        // child\n        os::setsid();\n    }\n\n    if(m_commandArgc != 0){\n        cmdInfo = runExec(shournalk, toplvlEfd);\n    } else {\n        cmdInfo = runMarkPid(shournalk, toplvlEfd);\n        os::mkfifo(m_fifoname, 0600);\n    }\n\n    // Everything is ready and cmdInfo.workingDirectory is\n    // also setup correctly. Try to act at least a bit like\n    // a daemon by chdir(\"/\"); e.g. to not block an unmount.\n    // Note that in case we were launched from within the\n    // shell integration the only open files should be\n    // our own logfile and the eventlog-file, both\n    // at locations which are usually never unmounted.\n    os::chdir(\"/\");\n\n    struct shournalk_run_result krun_result;\n    auto poll_result = do_polling(shournalk, &krun_result,\n                                  m_fifoname, &cmdInfo);\n    cmdInfo.endTime = QDateTime::currentDateTime();\n    if(cmdInfo.returnVal == CommandInfo::INVALID_RETURN_VAL &&\n            krun_result.selected_exitcode != SHOURNALK_INVALID_EXIT_CODE){\n        if(krun_result.selected_exitcode < 0 ||\n           krun_result.selected_exitcode > 255){\n            // the kernel module currently strips higher bits,\n            // so we should never get here.\n            logWarning << qtr(\"Unusual exit-code %1 received. Please report.\")\n                          .arg(krun_result.selected_exitcode);\n        }\n        logDebug << \"using exitcode from kernel module:\"\n                 << krun_result.selected_exitcode;\n        cmdInfo.returnVal = krun_result.selected_exitcode;\n    }\n\n    if(poll_result != 0){\n        // should never happen. Return failure regardless\n        // of launched command exit status\n        cpp_exit(2);\n    }\n    if(krun_result.error_nb != 0){\n        // may rarely happen if target file got lost during\n        // event processing (stored on NFS?)\n        QString msg;\n        switch (krun_result.error_nb) {\n        case EIO:\n            msg = qtr(\"%1. Maybe the target\"\n                      \"file resided on a NFS storage, which became unavailable?\")\n                    .arg(translation::strerror_l(EIO));\n            break;\n        default:\n            msg = translation::strerror_l(krun_result.error_nb);\n            break;\n        }\n\n        logWarning << qtr(\"Error %1 during file event processing \"\n                          \"(in kernel mode, most likely non-fatal): %2\")\n                      .arg(krun_result.error_nb).arg(msg);\n        // since it is nonfatal return cmd exit code\n\n    }\n    if(krun_result.lost_event_count != 0){\n        // TODO: insert cmd-id here.\n        logInfo << qtr(\"%1 events where lost\").arg(krun_result.lost_event_count);\n    }\n\n    if(m_printSummary){\n        shournal_run_common::print_summary(\n                    krun_result.w_event_count, krun_result.r_event_count,\n                    krun_result.lost_event_count,\n                    krun_result.stored_event_count,\n                    os::fstat(fileno(shournalk->tmpFileTarget())).st_size);\n    }\n\n    if(m_storeToDatabase){\n        // os::lseek(fileno_unlocked(tmpFileTarget), 0, SEEK_SET);\n        stdiocpp::fseek(shournalk->tmpFileTarget(), 0, SEEK_SET);\n        FileEvents fileEvents;\n        fileEvents.setFile(shournalk->tmpFileTarget());\n        try {\n            // Do not disturb other processes while we flush events to database\n            os::setpriority(PRIO_PROCESS, 0, PRIO_DATABASE_FLUSH);\n        } catch (const os::ExcOs&) {\n            // This may happen regularly, e.g. if priority was already lowered.\n            logDebug << \"Failed to set priority before database flush\";\n        }\n        try {\n            cmdInfo.idInDb = db_controller::addCommand(cmdInfo);\n            db_controller::addFileEvents(cmdInfo, fileEvents);\n        } catch (std::exception& e) {\n            // May happen, e.g. if we run out of disk space...\n            logCritical << qtr(\"Failed to store (some) file-events to disk: %1\").arg(e.what());\n        }\n    }\n    shournalk.reset();\n\n    cpp_exit(cmdInfo.returnVal);\n}\n\n\n"
  },
  {
    "path": "src/shournal-run/filewatcher_shournalk.h",
    "content": "#pragma once\n\n#include <QByteArray>\n#include <memory>\n\n#include \"commandinfo.h\"\n#include \"mark_helper.h\"\n\nextern const pid_t INVALID_PID;\n\nstruct shournalk_group;\nclass CEfd;\n\nclass Filewatcher_shournalk\n{\npublic:\n    typedef std::shared_ptr<ShournalkControl> ShournalK_ptr;\n\npublic:\n    static QByteArray fifopathForPid(pid_t pid);\n\n    Filewatcher_shournalk();\n\n    void setArgv(char **argv, int argc);\n    void setPid(const pid_t &pid);\n\n    void setCommandFilename(char *commandFilename);\n    void setStoreToDatabase(bool storeToDatabase);\n\n    void setShellSessionUUID(const QByteArray &shellSessionUUID);\n    void setForkIntoBackground(bool value);\n    void setCmdString(const QString &cmdString);\n    void setFifoname(const QByteArray &fifoname);\n    void setPrintSummary(bool printSummary);\n\n    [[noreturn]]\n    void run();\n\n\nprivate:\n    CommandInfo runExec(ShournalK_ptr& shournalk, CEfd &toplvlEfd);\n    CommandInfo runMarkPid(ShournalK_ptr& shournalk, CEfd &toplvlEfd);\n\n\n    int m_commandArgc{};\n    char* m_commandFilename{};\n    char **m_commandArgv;\n    bool m_forkIntoBackground{};\n    pid_t m_pid{INVALID_PID};\n    bool m_printSummary{false};\n    bool m_storeToDatabase{true};\n    QByteArray m_shellSessionUUID;\n    QString m_cmdString;\n    QByteArray m_fifoname;\n\n\n\n};\n\n"
  },
  {
    "path": "src/shournal-run/mark_helper.cpp",
    "content": "#include \"mark_helper.h\"\n\n#include <sys/user.h>\n#include <QHash>\n#include <QVersionNumber>\n#include <unistd.h>\n\n#include \"app.h\"\n#include \"shournalk_ctrl.h\"\n#include \"stdiocpp.h\"\n#include \"translation.h\"\n#include \"os.h\"\n#include \"logger.h\"\n\nusing std::unordered_map;\nusing std::string;\n\n\nExcShournalk::ExcShournalk(const QString &text) :\n    QExcCommon(text, false)\n{}\n\n\nusing StrLightSet = Settings::StrLightSet;\n\n\n/// Build the kernel settings according to our own\nstatic shounalk_settings buildKSettings(){\n    auto & s = Settings::instance();\n    auto & w_sets = s.writeFileSettings();\n    auto & r_sets = s.readFileSettings();\n    auto & script_sets = s.readEventScriptSettings();\n\n    struct shounalk_settings ksettings{};\n    ksettings.w_exclude_hidden = w_sets.excludeHidden;\n    ksettings.w_max_event_count = w_sets.maxEventCount;\n    ksettings.r_only_writable = r_sets.onlyWritable;\n    ksettings.r_exclude_hidden = r_sets.excludeHidden;\n    ksettings.r_max_event_count = r_sets.maxEventCount;\n    ksettings.r_store_only_writable = script_sets.onlyWritable;\n    ksettings.r_store_max_size = unsigned(script_sets.maxFileSize);\n    ksettings.r_store_max_count_of_files = uint16_t(script_sets.maxCountOfFiles);\n    ksettings.r_store_exclude_hidden = script_sets.excludeHidden;\n\n    if(s.hashSettings().hashEnable){\n        ksettings.hash_max_count_reads = s.hashSettings().hashMeta.maxCountOfReads;\n        ksettings.hash_chunksize = s.hashSettings().hashMeta.chunkSize;\n    }\n    return ksettings;\n}\n\n\n\nShournalkControl::ShournalkControl()\n{\n    m_kgrp = shournalk_init(O_CLOEXEC);\n    if(m_kgrp == nullptr){\n        throw ExcShournalk(\"init failed\");\n    }\n    shournalk_version kversion;\n    if(shournalk_read_version(&kversion) != 0){\n        throw ExcShournalk(qtr(\"Failed to read version from file %1 - %2\")\n                           .arg(shournalk_versionpath())\n                           .arg(translation::strerror_l(errno)));\n    }\n    if(strcmp(SHOURNAL_VERSION, kversion.ver_str) != 0){\n        // Try to avoid unnecessary unloading of the kernel module shournalk\n        // when a new version is installed:\n        auto kver = QVersionNumber::fromString(kversion.ver_str);\n        const auto minVersion = QVersionNumber{2,8};\n        if(kver < minVersion){\n            throw ExcShournalk(qtr(\"Version mismatch - kernel-module version is %1, but \"\n                                   \"min. required version of %2 is %3\")\n                               .arg(kversion.ver_str)\n                               .arg(app::SHOURNAL_RUN).arg(minVersion.toString()));\n        }\n        if(kver > app::version()){\n            logWarning <<qtr(\"The kernel-module (v%1) is newer than %2 (v%3). \"\n                             \"Continuing anyway...\")\n                         .arg(kversion.ver_str)\n                         .arg(app::SHOURNAL_RUN).arg(app::version().toString());\n\n        }\n    }\n\n    m_tmpFileTarget = stdiocpp::tmpfile(O_NOATIME); // tmpfile auto deletes..\n    if(m_tmpFileTarget == nullptr){\n        throw ExcShournalk(qtr(\"Failed to open temporary event target-file: %1\")\n                           .arg(translation::strerror_l(errno)));\n    }\n    int fd = fileno_unlocked(m_tmpFileTarget);\n    shournalk_set_target_fd(m_kgrp, fd);\n}\n\nShournalkControl::~ShournalkControl()\n{\n    shournalk_release(m_kgrp);\n    fclose(m_tmpFileTarget);\n}\n\n\n/// @throws ExcShournalk\nvoid ShournalkControl::doMark(pid_t pid, bool collectExitcode)\n{\n    try {\n        auto ksettings = buildKSettings();\n        shournalk_set_settings(m_kgrp, &ksettings);\n\n        int ret;\n        int flags = SHOURNALK_MARK_ADD;\n        if(collectExitcode){\n            flags |= SHOURNALK_MARK_COLLECT_EXITCODE;\n        }\n\n        if((ret = shournalk_filter_pid(m_kgrp, flags, pid)) != 0){\n            throw ExcShournalk(translation::strerror_l(ret));\n        }\n        auto & s = Settings::instance();\n\n        const auto & all_excl = s.getMountIgnorePaths();\n\n        const auto & w_incl = s.writeFileSettings().includePaths->allPaths();\n        const auto & w_excl = s.writeFileSettings().excludePaths->allPaths();\n        const auto & r_incl = s.readFileSettings().includePaths->allPaths();\n        const auto & r_excl = s.readFileSettings().excludePaths->allPaths();\n        const auto & script_incl = s.readEventScriptSettings().includePaths->allPaths();\n        const auto & script_excl = s.readEventScriptSettings().excludePaths->allPaths();\n\n\n        markPaths(w_incl, SHOURNALK_MARK_W_INCL );\n        markPaths(w_excl, SHOURNALK_MARK_W_EXCL );\n        markPaths(all_excl, SHOURNALK_MARK_W_EXCL );\n\n        if(s.readFileSettings().enable){\n            markPaths(r_incl, SHOURNALK_MARK_R_INCL);\n            markPaths(r_excl, SHOURNALK_MARK_R_EXCL);\n            markPaths(all_excl, SHOURNALK_MARK_R_EXCL);\n        }\n        if(s.readEventScriptSettings().enable){\n            markPaths(script_incl, SHOURNALK_MARK_SCRIPT_INCL);\n            markPaths(script_excl, SHOURNALK_MARK_SCRIPT_EXCL);\n            markPaths(all_excl, SHOURNALK_MARK_SCRIPT_EXCL);\n            const auto & exts = s.readEventScriptSettings().includeExtensions;\n            if(exts.size()){\n                markExtensions(exts, SHOURNALK_MARK_SCRIPT_EXTS);\n            }\n        }\n\n        if((ret = shournalk_commit(m_kgrp)) != 0){\n            throw ExcShournalk(qtr(\"failed to commit event target - %1\")\n                               .arg(translation::strerror_l(ret)));\n        }\n    } catch (ExcShournalk& ex) {\n        throw ExcShournalk(qtr(\"Failed to mark target process with pid \"\n                           \"%1 for observation - %2\")\n                       .arg(pid).arg(ex.descrip()));\n    }\n}\n\nvoid ShournalkControl::preparePollOnce()\n{\n    if(shournalk_prepare_poll_ONCE(m_kgrp)){\n        throw ExcShournalk(qtr(\"failed to prepare poll\"));\n    }\n}\n\nvoid ShournalkControl::removePid(pid_t pid)\n{\n    int ret;\n    if((ret = shournalk_filter_pid(m_kgrp, SHOURNALK_MARK_REMOVE, pid)) != 0){\n        throw ExcShournalk(\n                    qtr(\"Failed to unmark pid for observation: %1\").arg(translation::strerror_l(ret))\n                    );\n    }\n}\n\n\nFILE *ShournalkControl::tmpFileTarget() const\n{\n    return m_tmpFileTarget;\n}\n\nshournalk_group *ShournalkControl::kgrp() const\n{\n    return m_kgrp;\n}\n\n\n\nvoid ShournalkControl::markPaths(const Settings::StrLightSet& paths, int path_tpye){\n    int ret;\n    for(const auto& p : paths){\n        if((ret = shournalk_filter_string(m_kgrp,\n                                        SHOURNALK_MARK_ADD,\n                                        path_tpye,\n                                        p.c_str())) != 0 ){\n            throw ExcShournalk(qtr(\"failed to mark path \"\n                                   \"%1 - %2\").arg(p.c_str())\n                                             .arg(translation::strerror_l(ret)));\n        }\n    }\n}\n\n\nvoid ShournalkControl::markExtensions\n(const Settings::StrLightSet& extensions, int ext_type){\n    StrLight extBuf;\n    const StrLight::size_type BUF_SIZE = 4096;\n    extBuf.reserve(BUF_SIZE);\n    for(const auto & str : extensions){\n        // add extensions to a single long string\n        // separated by slash. Flush, if bigger\n        // than BUF_SIZE (unlikely)\n        if(extBuf.size() + str.size() + 1 > BUF_SIZE){\n            doMarkExtensions(extBuf, ext_type);\n        }\n        extBuf += str + '/';\n    }\n    if(! extBuf.empty()){\n        doMarkExtensions(extBuf, ext_type);\n    }\n}\n\n\nvoid ShournalkControl::doMarkExtensions\n(const StrLight& extensions, int ext_type){\n    int ret;\n    if((ret = shournalk_filter_string(m_kgrp,\n                                    SHOURNALK_MARK_ADD,\n                                    ext_type,\n                                    extensions.c_str())) != 0 ){\n        throw ExcShournalk(qtr(\"failed to mark extensions - %1 \"\n                               \"- extensions-string: %2\")\n                           .arg(translation::strerror_l(ret))\n                           .arg(extensions.c_str()));\n    }\n}\n"
  },
  {
    "path": "src/shournal-run/mark_helper.h",
    "content": "#pragma once\n\n#include <stdio.h>\n\n#include \"exccommon.h\"\n#include \"settings.h\"\n\n\nstruct shournalk_group;\n\nclass ExcShournalk : public QExcCommon\n{\npublic:\n    ExcShournalk(const QString & text);\n};\n\n/// c++ interface for shournal's kernel module.\nclass ShournalkControl {\npublic:\n    ShournalkControl();\n    ~ShournalkControl();\n\n    void doMark(pid_t pid, bool collectExitcode=false);\n    void preparePollOnce();\n\n    void removePid(pid_t pid);\n\n    FILE *tmpFileTarget() const;\n    shournalk_group *kgrp() const;\n\nprivate:\n    Q_DISABLE_COPY(ShournalkControl)\n    struct shournalk_group* m_kgrp;\n    FILE* m_tmpFileTarget;\n\n    void markPaths(const Settings::StrLightSet& paths, int path_tpye);\n    void markExtensions(const Settings::StrLightSet& extensions, int ext_type);\n    void doMarkExtensions(const StrLight &extensions, int ext_type);\n};\n"
  },
  {
    "path": "src/shournal-run/shournal-run.cpp",
    "content": "#include <csignal>\n#include <fcntl.h>\n#include <cassert>\n\n#include <QCoreApplication>\n#include <QVarLengthArray>\n#include <QDir>\n\n#include \"qoptargparse.h\"\n#include \"qoptvarlenarg.h\"\n#include \"excoptargparse.h\"\n#include \"os.h\"\n#include \"excos.h\"\n#include \"filewatcher_shournalk.h\"\n#include \"logger.h\"\n#include \"fdcommunication.h\"\n\n\n#include \"exccfg.h\"\n#include \"settings.h\"\n#include \"util.h\"\n#include \"qoutstream.h\"\n#include \"util.h\"\n#include \"translation.h\"\n#include \"app.h\"\n#include \"qexcdatabase.h\"\n#include \"cpp_exit.h\"\n#include \"db_connection.h\"\n#include \"storedfiles.h\"\n#include \"socket_message.h\"\n\nusing fdcommunication::SocketCommunication;\nusing socket_message::E_SocketMsg;\n\n\n\n#include <unistd.h>\n#include <fcntl.h>\n#include <stdlib.h>\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <sys/eventfd.h>\n#include <stdio.h>\n#include <poll.h>\n#include <string.h>\n#include <errno.h>\n#include <assert.h>\n\n\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"fdentries.h\"\n#include \"qoutstream.h\"\n#include \"shournalk_ctrl.h\"\n#include \"shournal_run_common.h\"\n\nusing namespace shournal_run_common;\n\n/// Uncaught exception handler\nstatic void onterminate() {\n    try {\n        auto unknown = std::current_exception();\n        if (unknown) {\n            std::rethrow_exception(unknown);\n        }\n    } catch (const std::exception& e) {\n        logCritical << e.what() << \"\\n\";\n    } catch (...) {\n        logCritical << \"unknown exception occurred\\n\";\n    }\n}\n\nstatic void closeFds(){\n    // close all file descriptors except stderr and\n    // a potential integration test descriptor.\n    auto keepFds = std::unordered_set<int>{2, app::findIntegrationTestFd()};\n    for(int fd : osutil::FdEntries()){\n        if(keepFds.find(fd) == keepFds.end()){\n            os::close(fd);\n        }\n    }\n}\n\n\nstatic int shournal_run_main(int argc, char *argv[])\n{\n    // Since we are waiting for other processes to finish, ignore typical\n    // signals.\n    osutil::setInertSighandler(os::catchableTermSignals());\n    // Using app::SHOURNAL for several common paths (database, config) used\n    // by QStandardPaths but app::CURRENT_NAME for others (log-filename)\n    app::setupNameAndVersion(app::SHOURNAL_RUN);\n\n    if(! translation::init()){\n        logWarning << \"Failed to initialize translation\";\n    }\n\n    logger::setup(app::CURRENT_NAME);\n\n    std::set_terminate(onterminate);\n\n    if(! shournal_common_init()){\n        logCritical << qtr(\"Fatal error: failed to initialize custom Qt conversion functions\");\n        cpp_exit(1);\n    }\n\n    // ignore first arg (command to this app)\n    --argc;\n    ++argv;\n    QOptArgParse parser;\n    parser.setHelpIntroduction(qtr(\"Observation backend for <%1> based \"\n                                   \"on a custom kernel module.\"\n                                      ).arg(app::SHOURNAL) + \"\\n\");\n    QOptArg argVersion(\"v\", \"version\", qtr(\"Display version\"), false);\n    parser.addArg(&argVersion);\n\n    QOptArg argPid(\"\", \"pid\",\n                         qtr(\"Mark the process with given pid for observation.\"));\n    parser.addArg(&argPid);\n\n\n    QOptArg argPrintFifopath(\"\", \"print-fifopath-for-pid\",\n                         qtr(\"Print the fifo path for a given pid \"\n                             \"and exit. A fifo is \"\n                             \"created, if argument %1 is given.\")\n                             .arg(argPid.name()));\n    parser.addArg(&argPrintFifopath);\n\n    QOptArg argFifoname(\"\", \"fifoname\",\n                         qtr(\"If arg %1 is also parsed, create \"\n                             \"the fifo under the given filename\").arg(argPid.name()));\n    parser.addArg(&argFifoname);\n    argFifoname.addRequiredArg(&argPid);\n\n    QOptArg argTmpDir(\"\", \"tmpdir\", \"NOT USED\");\n    // Interface compatibility with shournal-run-fanotify.\n    // Not being suid we can simply use $TMPDIR.\n    argTmpDir.setInternalOnly(true);\n    parser.addArg(&argTmpDir);\n\n    QOptVarLenArg argEnv(\"\", \"env\", \"NOT USED\");\n    // Interface compatibility with shournal-run-fanotify.\n    argEnv.setInternalOnly(true);\n    parser.addArg(&argEnv);\n\n    QOptArg argFork(\"\", \"fork\",\n                         qtr(\"Fork into background immediatly after marking \"\n                             \"a pid.\"), false);\n    parser.addArg(&argFork);\n\n    QOptArg argCmdString(\"\", \"cmd-string\",\n                         qtr(\"Associate the recording of a process with the \"\n                             \"given command string. Only used, if arg %1 \"\n                             \"is given\").arg(argPid.name()));\n    argCmdString.addRequiredArg(&argPid);\n    parser.addArg(&argCmdString);\n    argPid.addRequiredArg(&argCmdString);\n\n    QOptArg argCloseFds(\"\", \"close-fds\",\n                         qtr(\"Advanced option: closes file \"\n                             \"descriptors except stderr.\"), false);\n    parser.addArg(&argCloseFds);\n\n\n    QOptArg argPrintSummary(\"\", \"print-summary\",\n                         qtr(\"Print a short summary after \"\n                             \"event processing finished.\"), false);\n    parser.addArg(&argPrintSummary);\n\n    QOptArg argShournalkIsLoaded(\"\", \"shournalk-is-loaded\",\n                         qtr(\"If shournal's kernel module is loaded, \"\n                             \"exit with zero, else nonzero\"), false);\n    parser.addArg(&argShournalkIsLoaded);\n\n\n    QOptArg argExec(\"e\", \"exec\", qtr(\"Execute and observe the passed program \"\n                                     \"and its arguments (this argument has to be last).\"),\n                    false);\n    argExec.setFinalizeFlag(true);\n    parser.addArg(&argExec);\n\n    QOptArg argExecFilename(\"\", \"exec-filename\", qtr(\"This is an advanced option. \"\n                                                     \"In most cases the first argument of a \"\n                                                     \"program is the program name. For \"\n                                                     \"example for login-shells this does \"\n                                                     \"not have to be the case. If this \"\n                                                     \"argument is provided, that filename \"\n                                                     \"is used instead of argv[0]\"));\n    parser.addArg(&argExecFilename);\n    argExecFilename.addRequiredArg(&argExec);\n\n    QOptArg argVerbosity(\"\", \"verbosity\",\n                         qtr(\"How much shall be printed to stderr. Note that \"\n                             \"for 'dbg' shournal-run must not be a 'Release'-build.\"));\n    argVerbosity.setAllowedOptions(app::VERBOSITIES);\n    parser.addArg(&argVerbosity);\n\n    QOptArg argShellSessionUUID(\"\", \"shell-session-uuid\", qtr(\"uuid as base64-encoded string\"));\n    argShellSessionUUID.setInternalOnly(true);\n    parser.addArg(&argShellSessionUUID);\n\n    QOptArg argMakeSessionUUID(\"\", \"make-session-uuid\", qtr(\"print a unique uuid to stdout and \"\n                                                            \"exit\"), false);\n    argMakeSessionUUID.setInternalOnly(true);\n    parser.addArg(&argMakeSessionUUID);\n\n\n    QOptArg argNoDb(\"\", \"no-db\", qtr(\"For debug purposes: do not write to \"\n                                     \"database after event processing\"), false);\n    parser.addArg(&argNoDb);\n\n    auto argCfgDir = mkarg_cfgdir();\n    parser.addArg(&argCfgDir);\n    auto argDataDir = mkarg_datadir();\n    parser.addArg(&argDataDir);\n\n    try {\n        auto & sets = Settings::instance();\n        parser.parse(argc, argv);\n        if(argCfgDir.wasParsed()){\n            sets.setUserCfgDir(argCfgDir.getValue<QString>());\n        }\n        if(argDataDir.wasParsed()){\n            sets.setUserDataDir(argDataDir.getValue<QString>());\n        }\n\n        if(argCloseFds.wasParsed()){\n            // Do this early before we open fds ourselves.\n            closeFds();\n        }\n\n        if(argVerbosity.wasParsed()){\n            QByteArray verbosity = argVerbosity.getOptions(1).first().toLocal8Bit();\n            logger::setVerbosityLevel(verbosity.constData());\n        } else {\n            logger::setVerbosityLevel(QtMsgType::QtWarningMsg);\n        }\n\n        if(argPrintFifopath.wasParsed()){\n            QOut() << Filewatcher_shournalk::fifopathForPid(\n                          argPrintFifopath.getValue<pid_t>()\n                          ) << \"\\n\";\n            cpp_exit(0);\n        }\n\n        if(argShournalkIsLoaded.wasParsed()){\n            cpp_exit(! shournalk_module_is_loaded());\n        }\n\n        if(argMakeSessionUUID.wasParsed()){\n            bool madeSafe;\n            auto uuid = make_uuid(&madeSafe);\n            if(! madeSafe){\n                logInfo << qtr(\"session uuid not created 'safe'. Is the uuidd-daemon running?\");\n            }\n            QOut() << uuid.toBase64() << \"\\n\";\n            cpp_exit(0);\n        }\n\n        if(argExec.wasParsed() &&\n                argPid.wasParsed() ) {\n            QIErr() << qtr(\"%1 and %2 are mutually exclusive\").arg(argExec.name(), argPid.name());\n            cpp_exit(1);\n        }\n\n        if(argVersion.wasParsed()){\n            QOut() << app::SHOURNAL_RUN << qtr(\" version \") << app::version().toString() << \"\\n\";\n            cpp_exit(0);\n        }\n\n        try {\n            logger::enableLogToFile(app::SHOURNAL_RUN);\n            sets.load();\n            StoredFiles::mkpath();\n        } catch(const qsimplecfg::ExcCfg & ex){\n            QIErr() << qtr(\"Failed to load config file: \") << ex.descrip();\n            cpp_exit(1);\n        } catch(const QExcDatabase & ex){\n            QIErr() << qtr(\"Database-operation failed: \") << ex.descrip();\n            cpp_exit(1);\n        }  catch (const QExcIo& ex){\n            logCritical << qtr(\"IO-operation failed: \") << ex.descrip();\n            cpp_exit(1);\n        } catch (const os::ExcOs& ex){\n            logCritical << ex.what();\n            cpp_exit(1);\n        }\n\n        Filewatcher_shournalk fwatcher;\n\n        if(argExecFilename.wasParsed()){\n            // fwatcher command-filename must otherwise be null, to allow\n            // for correct storing of command in db (no duplicate first arg\n            // if not necessary!)\n            fwatcher.setCommandFilename(argExecFilename.vals().argv[0]);\n        }\n\n        fwatcher.setForkIntoBackground(argFork.wasParsed());\n        fwatcher.setPrintSummary(argPrintSummary.wasParsed());\n        fwatcher.setStoreToDatabase(! argNoDb.wasParsed());\n\n        if(argShellSessionUUID.wasParsed()){\n            fwatcher.setShellSessionUUID(\n                        QByteArray::fromBase64(argShellSessionUUID.getValue<QByteArray>()));\n        }\n        if(argExec.wasParsed()){\n            auto externCmd = parser.rest();\n            fwatcher.setArgv(externCmd.argv, externCmd.len);\n            fwatcher.run();\n        }\n        if(argPid.wasParsed()){\n            fwatcher.setPid(argPid.getValue<pid_t>(INVALID_PID));\n            fwatcher.setFifoname(argFifoname.getValue<QByteArray>(\n                                 Filewatcher_shournalk::fifopathForPid(getpid())\n                                     ));\n            assert(argCmdString.wasParsed());\n            fwatcher.setCmdString(argCmdString.getValue<QString>());\n            fwatcher.run();\n        }\n\n        if(parser.rest().len != 0){\n            QIErr() << qtr(\"Invalid parameters passed: %1.\\n\"\n                           \"Show help with --help\").\n                       arg( argvToQStr(parser.rest().len, parser.rest().argv));\n            cpp_exit(1);\n        }\n        QIErr() << \"No action specified\";\n\n    } catch (const ExcOptArgParse & ex) {\n        QIErr() << qtr(\"Commandline seems to be erroneous:\")\n                << ex.descrip();\n    }\n    cpp_exit(1);\n\n\n}\n\nint main(int argc, char *argv[])\n{\n    try {\n        shournal_run_main(argc, argv);\n    } catch (const ExcCppExit& e) {\n        return e.ret();\n    }\n}\n\n\n\n"
  },
  {
    "path": "src/shournal-run/shournalk_ctrl.c",
    "content": "#ifndef _GNU_SOURCE\n#define _GNU_SOURCE\n#endif\n\n#include <fcntl.h>\n#include <unistd.h>\n#include <stdio.h>\n#include <stdint.h>\n#include <sys/stat.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <stdbool.h>\n#include <assert.h>\n\n#include \"shournalk_ctrl.h\"\n\n#define SHOURNALK_CTRL_PATH \"/sys/kernel/shournalk_root/shournalk_ctrl\"\n\n// Unprivileged docker containers have a read-only sysfs filesystem.\n// So we look at below path where the hosts shournalk-control\n// might be bind-mounted (by the user) inside the container.\n#define SHOURNALK_CTRL_DOCKER_PATH \"/tmp/shournalk-sysfs/shournalk_ctrl\"\n\n\n#define SHOURNALK_MARK_PATH SHOURNALK_CTRL_PATH \"/mark\"\n#define SHOURNALK_DOCKER_MARK_PATH SHOURNALK_CTRL_DOCKER_PATH \"/mark\"\n\n#define SHOURNALK_VERSION_PATH SHOURNALK_CTRL_PATH  \"/version\"\n\nstatic bool __file_exists(const char* filename){\n    struct stat buffer;\n    return (stat (filename, &buffer) == 0);\n}\n\nstatic int __open_sysfs_mark(void){\n    const int o_flags = O_WRONLY | O_CLOEXEC;\n\n    int fd = open(SHOURNALK_MARK_PATH, o_flags);\n    if(fd >= 0)\n        return fd;\n\n    if(errno != EROFS){\n        perror(\"Failed to open shournalk's sysfs-interface at \"\n               SHOURNALK_MARK_PATH \". Is the kernel module loaded? modprobe shournalk\");\n        return -1;\n    }\n\n    // likely inside docker or another container - try alternative path\n    fd = open(SHOURNALK_DOCKER_MARK_PATH, o_flags);\n    if(fd >= 0)\n        return fd;\n\n    perror(\"Failed to open shournalk's sysfs-interface at\\n\"\n           SHOURNALK_MARK_PATH \" and\\n\" SHOURNALK_DOCKER_MARK_PATH\n           \". Is the kernel module loaded? modprobe shournalk\");\n    return -1;\n}\n\n\nstatic int __shournalk_filter_common(struct shournalk_group* grp, unsigned int flags,\n                                     int action)\n{\n    grp->__mark_struct.flags = flags;\n    grp->__mark_struct.action = action;\n\n    ssize_t writeRet = write(grp->__sysfs_mark_fd,\n                             &grp->__mark_struct,\n                             sizeof (grp->__mark_struct));\n    if(writeRet != sizeof (grp->__mark_struct)){\n        return errno;\n    }\n    return 0;\n}\n\nbool shournalk_module_is_loaded(void){\n    return __file_exists(shournalk_versionpath());\n}\n\nconst char* shournalk_versionpath(void){\n    return SHOURNALK_VERSION_PATH;\n}\n\n\n\n/// @param flags: for creating the pipe, e.g. O_NONBLOCK\nstruct shournalk_group*\nshournalk_init(unsigned int flags){\n    struct shournalk_group* grp;\n    int sysfs_fd;\n\n    int pip_descr[2];\n    if(pipe2(pip_descr, flags) == -1){\n        perror(\"pipe\");\n        return NULL;\n    }\n\n    sysfs_fd = __open_sysfs_mark();\n    if(sysfs_fd < 0){\n        close(pip_descr[0]);\n        close(pip_descr[1]);\n        return NULL;\n    }\n    grp = (struct shournalk_group*)calloc(1, sizeof (struct shournalk_group));\n    if(grp == NULL){\n        close(pip_descr[0]);\n        close(pip_descr[1]);\n        return NULL;\n    }\n    grp->__mark_struct.target_fd = -1;\n    grp->pipe_readend = pip_descr[0];\n    grp->__mark_struct.pipe_fd = pip_descr[1];\n    grp->__sysfs_mark_fd = sysfs_fd;\n    return grp;\n}\n\nvoid shournalk_release(struct shournalk_group* grp){\n    if(close(grp->pipe_readend) == -1){\n        perror(\"shournalk_release close pipe readend\");\n    }\n    if(close(grp->__sysfs_mark_fd) == -1){\n         perror(\"shournalk_release sysfs_mark_fd\");\n    }\n    if(grp->__mark_struct.pipe_fd != -1){\n        if(close(grp->__mark_struct.pipe_fd)){\n            perror(\"shournalk_release close pipe writeend\");\n        }\n    }\n    free(grp);\n}\n\nvoid shournalk_set_target_fd(struct shournalk_group* grp, int fd){\n    grp->__mark_struct.target_fd = fd;\n}\n\n\nvoid shournalk_set_settings(struct shournalk_group* grp,\n                            struct shounalk_settings* settings){\n    grp->__mark_struct.settings = *settings;\n}\n\n\n/// Make sure to set target_fd and settings beforehand\nint shournalk_filter_pid(struct shournalk_group* grp, unsigned int flags, pid_t pid)\n{\n    grp->__mark_struct.pid = pid;\n    return __shournalk_filter_common(grp, flags, SHOURNALK_MARK_PID);\n}\n\n/// @param str_tpye: one of SHOURNALK_MARK_*\n///                             R_INCL, R_EXCL\n///                             W_INCL, W_EXCL\n///                             SCRIPT_INCL, SCRIPT_EXCL, SCRIPT_EXTS\nint shournalk_filter_string(struct shournalk_group* grp, unsigned int flags,\n                          int str_tpye, const char* str){\n    grp->__mark_struct.data = str;\n    return __shournalk_filter_common(grp, flags, str_tpye);\n}\n\nint shournalk_commit(struct shournalk_group* grp){\n    return __shournalk_filter_common(grp, SHOURNALK_MARK_COMMIT, 0);\n}\n\n\n/// cĺose the pipe write end to avoid deadlock in poll.\n/// warning - may only be called once per shournalk-group.\n/// After that you are not allowed to call other functions but\n/// shournalk_release\nint shournalk_prepare_poll_ONCE(struct shournalk_group* grp){\n    assert(grp->__mark_struct.pipe_fd != -1);\n    if(close(grp->__mark_struct.pipe_fd)){\n        perror(\"shournalk_prepare_poll_ONCE close pipe writeend\");\n        return errno;\n    }\n    grp->__mark_struct.pipe_fd = -1;\n    return 0;\n}\n\n/// Read the version-string from sysfs, returning 0 on success,\n/// else nonzero with errno set.\nint shournalk_read_version(struct shournalk_version* ver){\n    int fd;\n    ssize_t ret = 0;\n\n    fd = open(SHOURNALK_VERSION_PATH, O_RDONLY);\n    if(fd < 0)\n        return fd;\n    ret = read(fd, ver->ver_str, sizeof (ver->ver_str));\n    if(ret < 0) goto out;\n    if(ret == sizeof (ver->ver_str)){\n        fprintf(stderr, \"shournalk_read_version - \"\n                        \"too large version string read (bug?)\\n\");\n        errno = EFBIG;\n        ret = -1;\n        goto out;\n    }\n    ver->ver_str[ret] = '\\0';\n    ret = 0; // success\n\nout:\n    close(fd);\n    return (int)ret;\n}\n\n\n"
  },
  {
    "path": "src/shournal-run/shournalk_ctrl.h",
    "content": "/* Generic c-interface to control shournal's kernel module from\n * userspace. Besides the kernel module (header) it depends only on\n * system headers.\n */\n\n#pragma once\n\n#include <fcntl.h>\n\n#include \"shournalk_user.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n\nstruct shournalk_group {\n    int pipe_readend; /* kernel notifies us when done */\n    int __sysfs_mark_fd;\n    struct shournalk_mark_struct __mark_struct;\n};\n\nstruct shournalk_version {\n    char ver_str[256];\n};\n\nbool shournalk_module_is_loaded(void);\nconst char* shournalk_versionpath(void);\n\n\nstruct shournalk_group* shournalk_init(unsigned int flags);\nvoid shournalk_release(struct shournalk_group* grp);\n\n\nvoid shournalk_set_target_fd(struct shournalk_group* grp, int fd);\nvoid shournalk_set_settings(struct shournalk_group* grp,\n                            struct shounalk_settings* settings);\n\n\nint shournalk_filter_pid(struct shournalk_group* grp, unsigned int flags, pid_t pid);\nint shournalk_filter_string(struct shournalk_group* grp, unsigned int flags,\n                           int str_tpye, const char* str);\nint shournalk_commit(struct shournalk_group* grp);\n\nint shournalk_prepare_poll_ONCE(struct shournalk_group* grp);\n\nint shournalk_read_version(struct shournalk_version* ver);\n\n\n#ifdef __cplusplus\n}\n#endif\n\n"
  },
  {
    "path": "src/shournal-run-fanotify/CMakeLists.txt",
    "content": "\n# This program is somewhat performance-critical,\n# so disable exports. FIXME: benchmark it\nIF(CMAKE_BUILD_TYPE MATCHES Release)\n    SET (CMAKE_ENABLE_EXPORTS FALSE)\nENDIF()\n\n\n\ninclude_directories(\n    ../common\n    ../common/qoptargparse\n    ../common/database\n    ../common/qsimplecfg\n    ../common/oscpp\n    ../common/database\n    ../common/qsqlthrow\n    ../../\n    )\n\n\nadd_executable(shournal-run-fanotify\n    shournal-run-fanotify.cpp\n    fanotify_controller.cpp\n    filewatcher_fan.cpp\n    mount_controller.cpp\n    msenter.cpp\n    orig_mountspace_process.cpp\n    )\n\ntarget_link_libraries(shournal-run-fanotify\n    lib_shournal_common\n    pthread\n    uuid\n    cap # capabilites\n    )\n\n\ninstall(\n    TARGETS shournal-run-fanotify\n    RUNTIME DESTINATION bin\n    PERMISSIONS SETUID\n                OWNER_READ OWNER_WRITE OWNER_EXECUTE\n                GROUP_READ GROUP_EXECUTE\n                WORLD_READ WORLD_EXECUTE\n)\n"
  },
  {
    "path": "src/shournal-run-fanotify/fanotify_controller.cpp",
    "content": "\n#ifndef _GNU_SOURCE\n    #define _GNU_SOURCE // Needed to get O_LARGEFILE definition\n#endif\n\n#include <sys/fanotify.h>\n#include <poll.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include <sys/stat.h>\n#include <climits>\n#include <iostream>\n#include <cstring>\n#include <cstddef>\n#include <sys/mount.h>\n#include <cstring>\n#include <array>\n\n\n#include \"fanotify_controller.h\"\n#include \"util.h\"\n#include \"fileeventhandler.h\"\n#include \"excos.h\"\n#include \"cxxhash.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"settings.h\"\n#include \"logger.h\"\n#include \"translation.h\"\n#include \"mount_controller.h\"\n#include \"db_connection.h\"\n#include \"storedfiles.h\"\n\n\nusing ExcCXXHash = CXXHash::ExcCXXHash;\nusing os::ExcOs;\nusing StringSet = Settings::StringSet;\n\n// Max number of fanotify events which can be consumed\n// with a single read(2). Note that the max number of open\n// fd's is also adjusted, however, since we already\n// have some other fd's open, the actual max number of\n// events will be a little lower.\nconst int FANOTIFY_MAX_EVENT_COUNT = 4096;\n\nnamespace  {\n\nQString fanotifyEventMaskToStr(uint64_t m){\n    QString action;\n    if(m & FAN_MODIFY){\n        action += \"modified \";\n    }\n    if(m & FAN_CLOSE_WRITE){\n        action += \"closed_write \";\n    }\n    if(m & FAN_CLOSE_NOWRITE){\n        action += \"closed_nowrite\";\n    }\n    if(m & FAN_OPEN){\n        action += \"open\";\n    }\n\n    if(action.isEmpty()){\n        action = \"unhandled event: \" + QString::number(m, 16);\n    }\n    return action;\n}\n\nbool fanotifyMarkWrapOnInit(int fanFd, uint64_t mask, const std::string& path_){\n    if (fanotify_mark(fanFd, FAN_MARK_ADD | FAN_MARK_MOUNT,\n                      mask, AT_FDCWD,\n                      path_.c_str()) == -1) {\n\n        const auto msg = qtr(\"fanotify_mark: failed to add path %1. \"\n                             \"It will not be observed: %2 failed - %3(%4)\")\n                         .arg(path_.c_str(), fanotifyEventMaskToStr(mask),\n                              translation::strerror_l()).arg(errno);\n        if(Settings::instance().getMountIgnoreNoPerm() && errno == EACCES){\n            logDebug << msg;\n        } else {\n            logWarning << msg;\n        }\n        return false;\n    }\n    logDebug << \"fanotify_mark\" << fanotifyEventMaskToStr(mask) << path_;\n    return true;\n\n}\n\n/// Fill param result with parentPaths and all sub-mountpaths, that is,\n/// all paths in allMountpaths, which are a sub-path of any parentPath,\n/// are added.\nvoid addPathsAndSubMountPaths(const std::shared_ptr<PathTree>& parentPaths,\n                              const std::shared_ptr<PathTree>& allMountpaths,\n                              StringSet& result){\n    for(const auto& p : *parentPaths){\n        // maybe_todo: avoid needless conversion\n        result.insert(p.c_str());\n        for(auto mountIt = allMountpaths->subpathIter(p); mountIt != allMountpaths->end(); ++mountIt){\n            // maybe_todo: avoid needless conversion\n            result.insert((*mountIt).constData());\n        }\n    }\n}\n\n\n} // anonymous namespace\n\n\n\n\n/// Initialize fanotify's filedescriptor (requires root)\n/// @throws ExcOs\nFanotifyController::FanotifyController() :\n    m_fanFd(-1),\n    m_markLimitReached(false),\n    m_ReadEventsUnregistered(false),\n    r_wCfg(Settings::instance().writeFileSettings()),\n    r_rCfg(Settings::instance().readFileSettings()),\n    r_scriptCfg(Settings::instance().readEventScriptSettings())\n{\n    // Create the file descriptor for accessing the fanotify API\n    m_fanFd = fanotify_init(FAN_CLOEXEC | FAN_NONBLOCK,\n                       O_RDONLY | O_LARGEFILE | O_CLOEXEC | O_NOATIME);\n\n    if (m_fanFd == -1) {\n        throw ExcOs(\"fanotify_init failed:\");\n    }\n\n\n}\n\nFanotifyController::~FanotifyController(){\n    try {\n        os::close(m_fanFd);\n    } catch (const std::exception& e) {\n        logCritical << __func__ << e.what();\n    }\n}\n\n\n\nint FanotifyController::fanFd() const\n{\n    return m_fanFd;\n}\n\nint FanotifyController::getFanotifyMaxEventCount() const\n{\n    return FANOTIFY_MAX_EVENT_COUNT;\n}\n\nvoid FanotifyController::setFileEventHandler(std::shared_ptr<FileEventHandler> & feventHandler)\n{\n    m_feventHandler = feventHandler;\n}\n\n\n/// fanotify_mark all paths of interest, that is all paths\n/// which shall be observed for read- or write-events.\n/// We unshared the mount-namespace before, so perform the\n/// mark by using mount-points.\n/// Also collect all mount-points, which are submounts of\n/// a desired path (if / shall be observed, e.g.\n/// mark filesystems under /media as well).\n/// Note that on marking a path, parent directories are possibly marked\n/// as well, if the mount-point lays above it.\n/// ( if mountpoint is /home and the dir /home/user/foo shall be observed,\n///   fanotify_mark marks /home, thus events occuring in /home and /home/user\n///   are also reported [and need to be filtered out later]).\nvoid FanotifyController::setupPaths(){\n    m_ReadEventsUnregistered = false;\n    auto allMounts = mountController::generatelMountTree();\n    StringSet allWritePaths;\n    addPathsAndSubMountPaths(r_wCfg.includePaths,\n                             allMounts, allWritePaths);\n\n    StringSet allReadPaths;\n    // Script files (which shall be stored) and 'normal' read files are treated differently later -\n    // first mark unified paths from both categories for fanotify read-events.\n    if(r_rCfg.enable){\n        addPathsAndSubMountPaths(r_rCfg.includePaths,\n                                 allMounts, allReadPaths);\n    }\n    if(r_scriptCfg.enable){\n        addPathsAndSubMountPaths(r_scriptCfg.includePaths,\n                                 allMounts, allReadPaths);\n    }\n\n    m_readMountPaths.reserve(allReadPaths.size());\n\n    uint64_t writeMask = FAN_CLOSE_WRITE;\n    uint64_t readMask = FAN_CLOSE_NOWRITE;\n    uint64_t readWriteMask = readMask | writeMask;\n\n    for(const auto & p : allWritePaths){\n        auto pathInReadIt = allReadPaths.find(p);\n        uint64_t m = writeMask;\n        if(pathInReadIt != allReadPaths.end()){\n            // path interesting for both, read and write\n            m = readWriteMask;\n            allReadPaths.erase(pathInReadIt);\n        }\n\n        if(fanotifyMarkWrapOnInit(m_fanFd, m, p) && m & readMask){\n            // once the specified number of read files was collected,\n            // the read paths shall be unregistered again. So store the paths.\n            m_readMountPaths.push_back(p);\n        }\n    }\n\n    // also add read paths not already marked above (along with write-paths).\n    for(const auto & p : allReadPaths){\n        if(fanotifyMarkWrapOnInit(m_fanFd, readMask, p)){\n            m_readMountPaths.push_back(p);\n        }\n    }\n\n    // ignore file events we generate ourselves\n    ignoreOwnPath(db_connection::getDatabaseDir().toUtf8());\n    ignoreOwnPath(StoredFiles::getReadFilesDir().toUtf8());\n    ignoreOwnPath(logger::logDir().toUtf8());\n    assert(m_feventHandler != nullptr);\n    ignoreOwnPath(m_feventHandler->getTmpDirPath().toUtf8());\n}\n\n\n\n/// Handle fanotify events.\n/// For a general introduction please see man fanotify.\nbool FanotifyController::handleEvents()\n{\n    struct fanotify_event_metadata *metadata;\n    struct fanotify_event_metadata buf[FANOTIFY_MAX_EVENT_COUNT];\n    ssize_t len;\n\n    // Loop while events can be read from fanotify file descriptor\n    while(true) {\n        // Read some events\n        len = read(m_fanFd, buf, sizeof(buf));\n        if (unlikely(len == -1 && errno != EAGAIN)) {\n            const auto preamble = qtr(\"read from fanotify file descriptor failed:\");\n            // maybe_todo: file a bug to the fanotify-devs? According to man 7 fanotify\n            // there should be no permission check, when the kernel repoens the file\n            // for fanotify...\n            // Furthermore it is unclear whether other events in the queue after a\n            // bad fd are gone as well.\n            switch (errno) {\n            case ENOENT: break; // a deleted file is not of interest anyway. ignore.\n            case EACCES:\n                logInfo << preamble\n                        << qtr(\"EACCES most likely occurred, because a not readable \"\n                               \"file was closed on a NFS-storage, or similar.\");\n                break;\n            default:\n                logWarning << preamble << \"(\" + QString::number(errno) + \") -\"\n                            << translation::strerror_l();\n                break;\n            }\n            return false;\n        }\n\n        // Check if end of available data reached\n        if (len <= 0) {\n            return true;\n        }\n        if(static_cast<size_t>(len) / sizeof(fanotify_event_metadata) < FANOTIFY_MAX_EVENT_COUNT / 8 ){\n            // Avoid reading too few events at a time (read-overhead). This sleep ensures\n            // the next read won't happen too soon.\n            usleep(1000*50);\n        }\n\n        logDebug << \"read\"\n                 << len << \"bytes (\"\n                 << static_cast<size_t>(len) / sizeof(fanotify_event_metadata) << \"events)\";\n\n        // Point to the first event in the buffer\n        metadata = buf;\n\n        // Loop over all events in the buffer\n        while (FAN_EVENT_OK(metadata, len)) {\n            // Check that run-time and compile-time structures match\n            if (unlikely(metadata->vers != FANOTIFY_METADATA_VERSION)) {\n                logCritical << qtr(\"Mismatch of fanotify metadata version - runtime: %1, \"\n                                   \"compiletime: %2. \"\n                                   \"No event-processing takes place. \"\n                                   \"Please recompile the application against the current \"\n                                   \"Kernel\").arg(metadata->vers, FANOTIFY_METADATA_VERSION);\n                // maybe_todo: unregister from all events?\n                return false;\n            }\n            // metadata->fd contains either FAN_NOFD, indicating a\n            // queue overflow, or a file descriptor (a nonnegative\n            // integer).\n            if (unlikely(metadata->fd < 0)) {\n                logWarning << \"fanotify: queue overflow\";\n                m_overflowCount++;\n            } else {\n                handleSingleEvent(*metadata);\n                ::close(metadata->fd);\n            }\n            // Advance to next event\n            metadata = FAN_EVENT_NEXT(metadata, len);\n        } // while (FAN_EVENT_OK(metadata, len))\n    } // while true\n}\n\n\n\nvoid FanotifyController::handleSingleEvent(\n        const struct fanotify_event_metadata& metadata){\n    if(unlikely(metadata.mask & FAN_Q_OVERFLOW)){\n        logWarning << \"fanotify: queue overflow\";\n        m_overflowCount++;\n    }\n #ifndef NDEBUG\n    {\n        auto st = os::fstat(metadata.fd);\n        std::string path;\n        try {\n            path = os::readlink(\"/proc/self/fd/\" + std::to_string(metadata.fd));\n        } catch (const os::ExcOs& ex) {\n            logDebug << ex.what();\n            path = \"UNKNOWN\";\n        }\n        auto action = fanotifyEventMaskToStr(metadata.mask);\n        logDebug << action << \"event-pid\" << metadata.pid << path\n                 << \"fd:\" << metadata.fd << \"uid: \" << st.st_uid\n                 << \" gid: \" << st.st_gid;\n    }\n#endif\n    if(metadata.mask & FAN_CLOSE_NOWRITE){\n        handleCloseRead_safe(metadata);\n    }\n    if (metadata.mask & FAN_CLOSE_WRITE) {\n        handleModCloseWrite_safe(metadata);\n    }\n}\n\n/// Handle a 'read'-event.\n/// If read 'script' files shall be stored, but not general read files,\n/// unregister from read events, as soon as the specified number of script\n/// files was collected.\nvoid FanotifyController::handleCloseRead_safe(const fanotify_event_metadata &metadata){\n    if(unlikely(m_ReadEventsUnregistered)){\n        // Do not edit: even if successfully unregistered,\n        // events in the fanotify event-queue may still need to be consumed.\n        return;\n    }\n    if(unlikely(! r_rCfg.enable && // never unregister, if general read files are logged\n         r_scriptCfg.enable &&\n            m_feventHandler->fileEvents().rStoredFilesCount() >=\n            r_scriptCfg.maxCountOfFiles)) {\n        unregisterAllReadPaths();\n        m_ReadEventsUnregistered = true;\n        return;\n    }\n\n    try {\n        m_feventHandler->handleCloseRead(metadata.fd);\n        // The count of cached read (script-) files might have been incremented,\n        // so we might be done with read events. For the sake\n        // of code-shortness only check that the *next* time we consume a read event.\n    } catch (const std::exception & e) {\n        logCritical << e.what();\n    }\n}\n\n\nvoid FanotifyController::handleModCloseWrite_safe(\n        const fanotify_event_metadata & metadata){\n    try {\n        m_feventHandler->handleCloseWrite( metadata.fd );\n    } catch (const std::exception & e) {\n        logCritical << e.what();\n    }\n}\n\n/// unregister read events for all previously marked paths.\nvoid FanotifyController::unregisterAllReadPaths()\n{\n    logDebug << \"enough read script-files collected. Unregistering...\";\n    for(const auto& p : m_readMountPaths){\n        if (fanotify_mark(m_fanFd, FAN_MARK_REMOVE | FAN_MARK_MOUNT,\n                          FAN_CLOSE_NOWRITE, AT_FDCWD,\n                          p.c_str()) == -1) {\n            logInfo << \"fanotify_mark: failed to remove read-path \" << p\n                    <<\": \" << translation::strerror_l();\n        }\n    }\n\n}\n\n\nvoid FanotifyController::ignoreOwnPath(const QByteArray& p){\n    if (fanotify_mark(m_fanFd,\n                      FAN_MARK_ADD | FAN_MARK_IGNORED_MASK |\n                      FAN_MARK_IGNORED_SURV_MODIFY | FAN_MARK_ONLYDIR,\n                      FAN_ALL_EVENTS,\n                      -1,\n                      p.constData()) == -1){\n        // should never happen...\n        logCritical << \"fanotify_mark: failed to ignore our own path: \"\n                    << translation::strerror_l(errno);\n    }\n}\n\nuint FanotifyController::getOverflowCount() const\n{\n    return m_overflowCount;\n}\n\n\n"
  },
  {
    "path": "src/shournal-run-fanotify/fanotify_controller.h",
    "content": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"fileeventhandler.h\"\n#include \"util.h\"\n\nstruct fanotify_event_metadata;\n\nclass FanotifyController\n{\npublic:\n    FanotifyController();\n    ~FanotifyController();\n    void setFileEventHandler(std::shared_ptr<FileEventHandler>&);\n    void setupPaths();\n\n    bool handleEvents();\n\n    int fanFd() const;\n\n    int getFanotifyMaxEventCount() const;\n    uint getOverflowCount() const;\n\npublic:\n    Q_DISABLE_COPY(FanotifyController)\n    DISABLE_MOVE(FanotifyController)\n\n\nprivate:\n\n    void handleSingleEvent(const fanotify_event_metadata &metadata);\n    void handleCloseRead_safe(const fanotify_event_metadata &metadata);\n    void handleModCloseWrite_safe(const fanotify_event_metadata &metadata);\n    void unregisterAllReadPaths();\n    void ignoreOwnPath(const QByteArray& p);\n\n    std::shared_ptr<FileEventHandler> m_feventHandler;\n\n    uint m_overflowCount{0};\n    int m_fanFd;\n    bool m_markLimitReached;\n    bool m_ReadEventsUnregistered;\n    std::vector<std::string> m_readMountPaths; // all mount paths initially marked for read-events\n    const Settings::WriteFileSettings& r_wCfg;\n    const Settings::ReadFileSettings& r_rCfg;\n    const Settings::ScriptFileSettings& r_scriptCfg;\n\n};\n\n"
  },
  {
    "path": "src/shournal-run-fanotify/filewatcher_fan.cpp",
    "content": "\n#include <cassert>\n#include <sys/types.h>\n#include <csignal>\n#include <poll.h>\n#include <sys/socket.h>\n#include <sys/prctl.h>\n#include <sys/capability.h>\n#include <syscall.h>\n#include <linux/securebits.h>\n\n#include <QHostInfo>\n#include <QDir>\n\n#include <thread>\n#include <future>\n\n#include \"compat.h\"\n#include \"filewatcher_fan.h\"\n#include \"fanotify_controller.h\"\n#include \"mount_controller.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"oscaps.h\"\n#include \"cleanupresource.h\"\n#include \"fdcommunication.h\"\n#include \"util.h\"\n#include \"logger.h\"\n#include \"subprocess.h\"\n#include \"excos.h\"\n#include \"db_globals.h\"\n#include \"db_connection.h\"\n#include \"db_controller.h\"\n#include \"commandinfo.h\"\n#include \"translation.h\"\n#include \"subprocess.h\"\n#include \"app.h\"\n#include \"pathtree.h\"\n#include \"fileeventhandler.h\"\n#include \"orig_mountspace_process.h\"\n#include \"cpp_exit.h\"\n#include \"qfilethrow.h\"\n#include \"storedfiles.h\"\n#include \"sys_ioprio.h\"\n#include \"qoutstream.h\"\n#include \"conversions.h\"\n#include \"socket_message.h\"\n#include \"shournal_run_common.h\"\n#include \"stdiocpp.h\"\n#include \"kernel/shournalk_user.h\"\n\nusing socket_message::E_SocketMsg;\nusing SocketMessages = fdcommunication::SocketCommunication::Messages;\nusing subprocess::Subprocess;\nusing osutil::closeVerbose;\n\nconst int PRIO_FANOTIFY_POLL = 2;\nconst int PRIO_DATABASE_FLUSH = 10;\n\n\nstatic void unshareOrDie(){\n    try {\n        os::unshare( CLONE_NEWNS);\n    } catch (const os::ExcOs& e) {\n        logCritical << e.what();\n        if(os::geteuid() != 0){\n            logCritical << qtr(\"Note that the effective userid is not 0 (root), so most probably %1 \"\n                               \"does not have the setuid-bit set. As root execute:\\n\"\n                               \"chown root %1 && chmod u+s %1\").arg(app::CURRENT_NAME);\n        }\n        cpp_exit(1);\n    }\n}\n\n/// Other applications unsharing their mount-namespace might rely on the\n/// fact that they cannot be joined (except from root). Therefor shournal\n/// allows only joining of processes whose (effective) gid matches\n/// below group.\nstatic gid_t findMsenterGidOrDie(){\n    auto* groupInfo = getgrnam(app::MSENTER_ONLY_GROUP);\n    if(groupInfo == nullptr){\n        logCritical << qtr(\"group %1 does not exist on your \"\n                           \"system but is required. Please add it:\\n\"\n                           \"groupadd %1\").arg(app::MSENTER_ONLY_GROUP);\n        cpp_exit(1);\n    }\n    return groupInfo->gr_gid;\n}\n\n\n/// The childprocess's mount-namespace can be joined by shournal-run (msenter).\n/// It has a group-id which should be used solely for this purpose which\n/// serves as a permission check, so shournal-run cannot be used to join\n/// processes which were not 'created' by it.\nFileWatcher::MsenterChildReturnValue FileWatcher::setupMsenterTargetChildProcess(){\n    assert(os::geteuid() == os::getuid());\n    os::seteuid(0);\n\n    // set ids before fork, so parent does not need to wait for child\n    // (msenter uid and gid permission check!)\n    os::setegid(m_msenterGid);\n    os::seteuid(m_realUid);\n\n    auto pipe_ = os::pipe();\n    auto msenterPid = os::fork();\n\n    if(msenterPid != 0){\n        // parent\n        os::seteuid(0);\n        os::setegid(os::getgid());\n        os::seteuid(m_realUid);\n        os::close(pipe_[0]);\n        return {msenterPid, pipe_[1]};\n    }\n    // child\n    if(m_sockFd != -1){\n        // the socket is used to wait for other processes, not this one, so:\n        os::close(m_sockFd);\n    }\n    os::close(pipe_[1]);\n    char c;\n    // wait unitl parent-process closes its write-end\n    os::read(pipe_[0], &c, 1);\n    exit(0);\n}\n\nFileWatcher::FileWatcher() :\n    m_sockFd(-1),\n    m_msenterGid(std::numeric_limits<gid_t>::max()),\n    m_commandArgc(0),\n    m_commandFilename(nullptr),\n    m_commandArgv(nullptr),\n    m_commandEnvp(environ),\n    m_realUid(os::getuid()),\n    m_storeToDatabase(true)\n{}\n\nvoid FileWatcher::setupShellLogger()\n{\n    m_shellLogger.setFullpath(logger::logDir() + \"/log_\" + app::SHOURNAL + \"_shell_integration\");\n    m_shellLogger.setup();\n}\n\nstd::shared_ptr<FileEventHandler> FileWatcher::createFileEventHandler()\n{\n    // fevent-handler sets up a temp dir in constructor - use user privileges.\n    // ld.so clears TMPDIR from the envirnoment of suid-binaries for security reasons,\n    // so set it temporarily:\n    assert(os::geteuid() == os::getuid());\n    static const char* TMPDIR = \"TMPDIR\";\n\n    const char* oldTmp = getenv(TMPDIR);\n    auto resetTmpDir = finally([&oldTmp] {\n        if(oldTmp == nullptr)\n            unsetenv(TMPDIR);\n        else\n            os::setenv(TMPDIR, oldTmp);\n\n    }, false);\n\n    if(! m_tmpDir.isEmpty()){\n        os::setenv(TMPDIR, m_tmpDir.constData());\n        resetTmpDir.setEnabled(true);\n    }\n    return std::make_shared<FileEventHandler>();\n}\n\n\n/// Unshare the mount-namespace and mark the interesting mounts with fanotify according\n/// to the paths specified in settings.\n/// Then either start a new process (passed argv) or wait until the passed socket is closed.\n/// In this case, we are in the shell observation mode.\n/// To allow other processes to join (--msenter), we fork off a child process with a\n/// special group id, which waits for us to finish.\n/// Process fanotify events until the observed process finishes (first case) or until\n/// all other instances of the passed socket are closed by the observed processes.\n/// See also code in directory 'shell-integration'.\nvoid FileWatcher::run()\n{\n    m_msenterGid = findMsenterGidOrDie();\n    orig_mountspace_process::setupIfNotExist();\n    m_fEventHandler = createFileEventHandler();\n\n    os::seteuid(0);\n    unshareOrDie();\n    auto fanotifyCtrl = FanotifyController_ptr(new FanotifyController);\n    fanotifyCtrl->setFileEventHandler(m_fEventHandler);\n    fanotifyCtrl->setupPaths();\n\n    // We process events (filedescriptor-receive- and fanotify-events) with the\n    // effective uid of the caller, because read events for files, for which\n    // only the owner has read permission, usually fail for\n    // root in case of NFS-storages. See also man 5 exports, look for 'root squashing'.\n    os::seteuid(m_realUid);\n\n\n    // maybe_todo: change scheduler?\n    // struct sched_param sched{};\n    // sched.sched_priority = 0;\n    // if(sched_setscheduler(getpid(), SCHED_BATCH | SCHED_RESET_ON_FORK, &sched) == -1){\n    //     logInfo << __FILE__ << \"sched_setscheduler failed\" << translation::strerror_l(errno) ;\n    // }\n\n    CommandInfo cmdInfo =  CommandInfo::fromLocalEnv();\n    cmdInfo.sessionInfo.uuid = m_shellSessionUUID;\n\n    int ret = 1;\n    m_sockCom.setReceiveBufferSize(RECEIVE_BUF_SIZE);\n    E_SocketMsg pollResult;\n    if(m_commandArgc != 0){\n        if(m_commandFilename != nullptr){\n            cmdInfo.text += QString(m_commandFilename) + \" \";\n        }\n        cmdInfo.text += argvToQStr(m_commandArgc, m_commandArgv);\n        auto sockPair = os::socketpair(PF_UNIX, SOCK_STREAM | SOCK_CLOEXEC );\n        m_sockCom.setSockFd(sockPair[0]);\n\n        Subprocess proc;\n        proc.setAsRealUser(true);\n        proc.setEnviron(m_commandEnvp);\n        cmdInfo.startTime = QDateTime::currentDateTime();\n        // TOODO: evtl. allow to configure proc to not close one of our sockets,\n        // to wait on grandchildren.\n        // Remove SOCK_CLOEXEC for one of them in that case\n        const char* cmdFilename = (m_commandFilename == nullptr) ? m_commandArgv[0]\n                                                                 : m_commandFilename;\n        proc.call(cmdFilename, m_commandArgv);\n        // *Must* be called after fork (resurce limits, etc.)\n        std::future<E_SocketMsg> thread = std::async(&FileWatcher::fan_pollUntilStopped, this,\n                                                     std::ref(cmdInfo),\n                                                     std::ref(fanotifyCtrl));\n        try {\n            cmdInfo.returnVal = proc.waitFinish();\n        } catch (const os::ExcProcessExitNotNormal& ex) {\n            // return typical shell cpp_exit code\n            cmdInfo.returnVal = 128 + ex.status();\n        }\n        ret = cmdInfo.returnVal;\n        // that should stop the polling event loop:\n        os::close(sockPair[1]);\n        thread.wait();\n        os::close(sockPair[0]);\n        pollResult = thread.get();\n    } else if(m_sockFd != -1){\n        MsenterChildReturnValue msenterChildRet = setupMsenterTargetChildProcess();\n        auto closeMsenterWritePipe = finally([&msenterChildRet] {\n            os::close(msenterChildRet.pipeWriteEnd);\n            os::waitpid(msenterChildRet.pid);\n        });\n        m_sockCom.setSockFd(m_sockFd);\n        // should be overwritten later, null-constraint in db...\n        cmdInfo.startTime = QDateTime::currentDateTime();\n        setupShellLogger();\n        int rootDirFd = os::open(\"/\", O_RDONLY | O_DIRECTORY);\n        auto closeRootDir = finally([&rootDirFd] { closeVerbose(rootDirFd);} );\n        SocketMessages sockMesgs;\n        m_sockCom.sendMsg({int(E_SocketMsg::SETUP_DONE),\n                           qBytesFromVar(msenterChildRet.pid), rootDirFd});\n\n        pollResult = fan_pollUntilStopped(cmdInfo, fanotifyCtrl);\n        ret = 0;\n    } else {\n        pollResult = E_SocketMsg::ENUM_END;\n        assert(false);\n    }\n\n    cmdInfo.endTime = QDateTime::currentDateTime();\n    logDebug << \"polling finished - about to cleanup and exit\";\n\n    auto fanOveflowCount = fanotifyCtrl->getOverflowCount();\n    fanotifyCtrl.reset();\n\n    switch (pollResult) {\n    case E_SocketMsg::EMPTY: break; // Normal case\n    case E_SocketMsg::ENUM_END:\n        logCritical << qtr(\"Because an error occurred, processing of \"\n                           \"fanotify/socket-events was \"\n                            \"stopped\");\n        cpp_exit(ret);\n    default:\n        logWarning << \"unhandled case for pollResult: \" << int(pollResult);\n        break;\n    }\n\n    QStringList missingFields;\n    if(cmdInfo.text.isEmpty()){\n        // An empty command text should only occur, if the observed shell-session\n        // exits. Discard this command.\n        logDebug << \"command-text is empty, \"\n                    \"not pushing to database...\";\n        cpp_exit(ret);\n    }\n    if(cmdInfo.returnVal == CommandInfo::INVALID_RETURN_VAL){\n        missingFields += qtr(\"return value\");\n    }\n    if(! missingFields.isEmpty()){\n        logDebug << \"The following fields are empty: \" << missingFields.join(\", \");\n    }\n\n    if(m_printSummary){\n        auto & fevents = m_fEventHandler->fileEvents();\n        QString overflowEvents = (fanOveflowCount)\n                ? \">= \" + QString::number(fanOveflowCount)\n                : \"0\";\n        QErr() << qtr(\"=== %1 summary ===\\n\"\n                      \"number of write-events: %2\\n\"\n                      \"number of read-events: %3\\n\"\n                      \"number of lost events: %4\\n\"\n                      \"number of stored read files: %5\\n\"\n                      \"size of tmp-file: %6\\n\")\n                  .arg(app::CURRENT_NAME)\n                  .arg(fevents.wEventCount())\n                  .arg(fevents.rEventCount())\n                  .arg(overflowEvents)\n                  .arg(fevents.rStoredFilesCount())\n                  .arg(Conversions().bytesToHuman(\n                           os::fstat(fileno(fevents.file())).st_size));\n    }\n\n    if(m_storeToDatabase){\n        flushToDisk(cmdInfo);\n    }\n    cpp_exit(ret);\n}\n\nvoid FileWatcher::setShellSessionUUID(const QByteArray &shellSessionUUID)\n{\n    m_shellSessionUUID = shellSessionUUID;\n}\n\nvoid FileWatcher::setArgv(char **argv, int argc)\n{\n    m_commandArgv = argv;\n    m_commandArgc = argc;\n}\n\nvoid FileWatcher::setCommandEnvp(char **commandEnv)\n{\n    m_commandEnvp = commandEnv;\n}\n\nvoid FileWatcher::setSockFd(int sockFd)\n{\n    m_sockFd = sockFd;\n}\n\nint FileWatcher::sockFd() const\n{\n    return m_sockFd;\n}\n\nvoid FileWatcher::setStoreToDatabase(bool storeToDatabase)\n{\n    m_storeToDatabase = storeToDatabase;\n}\n\nvoid FileWatcher::setPrintSummary(bool printSummary)\n{\n    m_printSummary = printSummary;\n}\n\nvoid FileWatcher::setTmpDir(const QByteArray &tmpDir)\n{\n    m_tmpDir = tmpDir;\n}\n\n\nvoid FileWatcher::setCommandFilename(char *commandFilename)\n{\n    m_commandFilename = commandFilename;\n}\n\n\n///  @return E_SocketMsg::EMPTY, if processing shall be stopped\nE_SocketMsg FileWatcher::processSocketEvent( CommandInfo& cmdInfo ){\n    m_sockCom.receiveMessages(&m_sockMessages);\n    E_SocketMsg returnMsg = E_SocketMsg::ENUM_END;\n    for(auto & msg : m_sockMessages){\n        if(msg.bytes.size() > RECEIVE_BUF_SIZE - 1024*10){\n            logWarning << \"unusual large message received\";\n        }\n        if(msg.msgId == -1){\n            return E_SocketMsg::EMPTY;\n        }\n        assert(msg.msgId >=0 && msg.msgId < int(E_SocketMsg::ENUM_END));\n\n        returnMsg = E_SocketMsg(msg.msgId);\n\n        logDebug << \"received message:\"\n                 << socket_message::socketMsgToStr(E_SocketMsg(msg.msgId))\n                 << msg.bytes;\n        switch (E_SocketMsg(msg.msgId)) {\n        case E_SocketMsg::COMMAND: {\n            cmdInfo.text = msg.bytes;\n            break;\n        }\n        case E_SocketMsg::CMD_START_DATETIME: {\n            cmdInfo.startTime = QDateTime::fromString(QString::fromUtf8(msg.bytes),\n                                                      Conversions::dateIsoFormatWithMilliseconds());\n            assert(! cmdInfo.startTime.isNull());\n            break;\n        }\n        case E_SocketMsg::RETURN_VALUE: {\n            cmdInfo.returnVal = varFromQBytes<qint32>(msg.bytes);\n            break;\n        }\n        case E_SocketMsg::LOG_MESSAGE:\n            m_shellLogger.stream() << msg.bytes << Qt::endl;\n            break;\n\n        case E_SocketMsg::CLEAR_EVENTS:\n            m_fEventHandler->clearEvents();\n            // maybe_todo: also clear fanotify overflow events\n            // (which occurred very unlikely in this case)\n            cmdInfo.startTime = QDateTime::currentDateTime();\n            break;\n        default: {\n            // application bug?\n            returnMsg = E_SocketMsg::EMPTY;\n            logCritical << qtr(\"invalid message received - : %1\").arg(int(msg.msgId));\n            break;\n        }\n        }\n    }\n    assert(returnMsg != E_SocketMsg::ENUM_END);\n    return returnMsg;\n}\n\nvoid FileWatcher::flushToDisk(CommandInfo& cmdInfo){\n    assert(os::getegid() == os::getgid());\n    assert(os::geteuid() == os::getuid());\n\n    // Do not disturb other processes while we flush events to database\n    os::setpriority(PRIO_PROCESS, 0, PRIO_DATABASE_FLUSH);\n    try {\n        cmdInfo.idInDb = db_controller::addCommand(cmdInfo);\n        StoredFiles::mkpath();\n        stdiocpp::fseek(m_fEventHandler->fileEvents().file(), 0, SEEK_SET);\n        db_controller::addFileEvents(cmdInfo, m_fEventHandler->fileEvents());\n    } catch (std::exception& e) {\n        // May happen, e.g. if we run out of disk space...\n        logCritical << qtr(\"Failed to store (some) file-events to disk: %1\").arg(e.what());\n    }\n}\n\n/// @return: EMPTY, if stopped regulary\n///          ENUM_END in case of an error\nE_SocketMsg FileWatcher::fan_pollUntilStopped(CommandInfo& cmdInfo,\n                             FanotifyController_ptr& fanotifyCtrl){\n\n    // To allow for more fanotify-events read at a time, increase\n    // RLIMIT_NOFILE\n    struct rlimit rlim{};\n    getrlimit(RLIMIT_NOFILE, &rlim);\n    const auto NO_FILE = fanotifyCtrl->getFanotifyMaxEventCount();\n    rlim.rlim_cur = NO_FILE;\n    if(setrlimit(RLIMIT_NOFILE, &rlim) == -1){\n        logInfo << qtr(\"Failed to set number of open files to %1 - %2\")\n                   .arg(NO_FILE)\n                   .arg(translation::strerror_l(errno));\n    }\n\n    // At least on centos 7 with Kernel 3.10 CAP_SYS_PTRACE is required, otherwise\n    // EACCES occurs on readlink of the received file descriptors\n    // Warning: changing euid from 0 to nonzero resets the effective capabilities,\n    // so don't do that until processing finished.\n    auto caps = os::Capabilites::fromProc();\n    const os::Capabilites::CapFlags eventProcessingCaps { CAP_SYS_PTRACE, CAP_SYS_NICE };\n    caps->setFlags(CAP_EFFECTIVE, { eventProcessingCaps });\n    auto resetEventProcessingCaps = finally([&caps, &eventProcessingCaps] {\n        caps->clearFlags(CAP_EFFECTIVE, eventProcessingCaps);\n    });\n\n    os::setpriority(PRIO_PROCESS, 0, PRIO_FANOTIFY_POLL);\n    auto resetPriority = finally([] {\n        os::setpriority(PRIO_PROCESS, 0, 0);\n    });\n\n    if(syscall(SYS_ioprio_set, IOPRIO_WHO_PROCESS, 0,\n               IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 6))){\n        logWarning << \"Failed to set io-priority:\" << strerror(errno);\n    }\n\n    int poll_num;\n    const nfds_t nfds = 2;\n    struct pollfd fds[nfds];\n\n    fds[0].fd = m_sockCom.sockFd();\n    fds[0].events = POLLIN;\n\n    // Fanotify input\n    fds[1].fd = fanotifyCtrl->fanFd();\n    fds[1].events = POLLIN;\n    while (true) {\n        // cleanly cpp_exit poll:\n        // poll for two file descriptors: the fanotify descriptor and\n        // another one, which receives an cpp_exit-message).\n        poll_num = poll(fds, nfds, -1);\n        if (poll_num == -1) {\n            if (errno == EINTR){     // Interrupted by a signal\n                continue;            // Restart poll()\n            }\n            logCritical << qtr(\"poll failed (%1) - %2\").arg(errno)\n                           .arg(translation::strerror_l());\n            return E_SocketMsg::ENUM_END;\n        }\n        // 0 only on timeout, which is infinite\n        assert(poll_num != 0);\n\n        // Important: first handle fanotify events, then check the socket if we are done.\n        // Otherwise final fanotify-events might get lost!\n        if (fds[1].revents & POLLIN) {\n            // Fanotify events are available\n            logDebug << \"new fanotify events...\";\n            fanotifyCtrl->handleEvents();\n        }\n        if (fds[0].revents & POLLIN) {\n            if(processSocketEvent(cmdInfo) == E_SocketMsg::EMPTY){\n                return E_SocketMsg::EMPTY;\n            }\n        }\n    }\n\n}\n\n\n\n\n\n\n"
  },
  {
    "path": "src/shournal-run-fanotify/filewatcher_fan.h",
    "content": "#pragma once\n\n#include \"logger.h\"\n#include \"fileeventhandler.h\"\n#include \"fanotify_controller.h\"\n#include \"socket_message.h\"\n#include \"fdcommunication.h\"\n\nclass FanotifyController;\nstruct CommandInfo;\n\nclass FileWatcher {\npublic:\n    FileWatcher();\n\n    void setupShellLogger();\n\n    [[noreturn]]\n    void run();\n\n    void setShellSessionUUID(const QByteArray &shellSessionUUID);\n\n    void setArgv(char** argv, int argc);\n\n    void setCommandEnvp(char **commandEnv);\n    void setCommandFilename(char *commandFilename);\n\n    void setSockFd(int sockFd);\n\n    int sockFd() const;\n\n\n    void setStoreToDatabase(bool storeToDatabase);\n    void setPrintSummary(bool printSummary);\n    void setTmpDir(const QByteArray &tmpDir);\n\nprivate:\n    struct MsenterChildReturnValue {\n        MsenterChildReturnValue(pid_t p, int pipeWrite) :\n            pid(p), pipeWriteEnd(pipeWrite){}\n        pid_t pid;\n        int pipeWriteEnd;\n    };\n    typedef std::unique_ptr<FanotifyController> FanotifyController_ptr;\n\n    static const int RECEIVE_BUF_SIZE = 1024*1024;\n\n    int m_sockFd;\n    logger::LogRotate m_shellLogger;\n    std::shared_ptr<FileEventHandler> m_fEventHandler;\n    QByteArray m_tmpDir;\n    gid_t m_msenterGid;\n    fdcommunication::SocketCommunication m_sockCom;\n    QByteArray m_shellSessionUUID;\n    int m_commandArgc;\n    char* m_commandFilename;\n    char **m_commandArgv;\n    char ** m_commandEnvp;\n    uid_t m_realUid;\n    bool m_printSummary{};\n    fdcommunication::SocketCommunication::Messages m_sockMessages;\n    bool m_storeToDatabase;\n\n    std::shared_ptr<FileEventHandler> createFileEventHandler();\n    MsenterChildReturnValue setupMsenterTargetChildProcess();\n    socket_message::E_SocketMsg fan_pollUntilStopped(CommandInfo& cmdInfo,\n                                 FanotifyController_ptr& fanotifyCtrl);\n    socket_message::E_SocketMsg processSocketEvent( CommandInfo& cmdInfo );\n    void flushToDisk(CommandInfo& cmdInfo);\n\n\n};\n\n\n"
  },
  {
    "path": "src/shournal-run-fanotify/mount_controller.cpp",
    "content": "#include <sys/mount.h>\n#include <mntent.h>\n#include <unistd.h>\n#include <deque>\n#include <cstring>\n#include <iostream>\n#include <sys/stat.h>\n#include <regex>\n\n#include <QDebug>\n#include <QStandardPaths>\n\n#include \"mount_controller.h\"\n#include \"util.h\"\n#include \"settings.h\"\n#include \"excos.h\"\n#include \"os.h\"\n#include \"pathtree.h\"\n#include \"logger.h\"\n#include \"cleanupresource.h\"\n\n\n/// Return all mountpaths from /proc/self/mounts except the\n/// ones marked to be ignored (settings).\nstd::shared_ptr<PathTree> mountController::generatelMountTree(){\n    auto & ignoreMountPaths = Settings::instance().getMountIgnorePaths();\n    PathTree ignoreMountTree;\n    for(const auto & path : ignoreMountPaths){\n        ignoreMountTree.insert(path);\n    }\n\n    // iterate over all of our mounts\n    FILE* mounts = setmntent (\"/proc/self/mounts\", \"r\");\n    if (mounts == nullptr) {\n        throw ExcMountCtrl(\"setmntent /proc/self/mounts failed\");\n    }\n    auto closeLater = finally([&mounts] { endmntent (mounts); });\n\n    // Determine which submounts shall be ignored and\n    // collect the others\n    auto mountTree = std::make_shared<PathTree>();\n    struct mntent* mnt_;\n    while ((mnt_ = getmntent (mounts)) != nullptr) {\n        const StrLight mntDir(mnt_->mnt_dir);\n        if(ignoreMountTree.isSubPath(mntDir, true)){\n            logDebug << \"ignoring mountpath\" << mntDir.c_str();\n            continue;\n        }\n        mountTree->insert(mntDir);\n    }\n    return mountTree;\n}\n\n\n\n\n\n\n"
  },
  {
    "path": "src/shournal-run-fanotify/mount_controller.h",
    "content": "#pragma once\n\n#include <string>\n#include <unordered_set>\n#include <memory>\n\n\n#include \"exccommon.h\"\n#include \"settings.h\"\n\nclass ExcMountCtrl : public QExcCommon\n{\npublic:\n    using QExcCommon::QExcCommon;\n};\n\n\nnamespace mountController {\n    std::shared_ptr<PathTree> generatelMountTree();\n\n}\n\n\n"
  },
  {
    "path": "src/shournal-run-fanotify/msenter.cpp",
    "content": "/* Allow joining a mount-namespace created by shournal.\n * The most elegant solution would have been to pass the mnt-fd to the observed shell\n * process and let that one call setns before each command sequence. That is\n * however not allowed. TODO: suggest the Kernel devs, to skip permission checks\n * for setns if the respective fd in /proc/$pid/ns was opened by root.\n * Instead we call setns before exec, which we can, because being a setuid-program.\n * Thus we should perform some permission checks, to only allow joining mount-namespaces created\n * by shournal.\n * The target pid's user-id must be the same as the caller, its group *must* be\n * app::MSENTER_ONLY_GROUP.\n * Reenter the working directory (checked race condition, because using the dirfd does not work).\n * */\n\n#include \"msenter.h\"\n\n#include <fcntl.h>\n#include <error.h>\n#include <climits>\n#include <csignal>\n#include <cassert>\n\n#include <QDir>\n#include \"os.h\"\n#include \"excos.h\"\n#include \"qoutstream.h\"\n#include \"util.h\"\n#include \"translation.h\"\n#include \"osutil.h\"\n#include \"pidcontrol.h\"\n#include \"cleanupresource.h\"\n#include \"app.h\"\n#include \"logger.h\"\n#include \"fdentries.h\"\n\nusing osutil::closeVerbose;\n\n/// @overload\nvoid msenter::run(pid_t targetPid, const char* filename, char *commandArgv[], char **envp)\n{\n    int targetprocDirFd = os::open(\"/proc/\" + std::to_string(targetPid), O_DIRECTORY);\n    auto closeFdLater = finally([&targetprocDirFd] {  closeVerbose(targetprocDirFd);  });\n    run(filename, commandArgv, envp, targetprocDirFd);\n}\n\n/// @param targetprocDirFd: an open directory descriptor of the target process.\n/// The caller is responsible, for closing it, if desired\nvoid msenter::run(const char* filename, char *commandArgv[], char **envp, int targetprocDirFd)\n{\n    struct stat targetPidSt =  os::fstat(targetprocDirFd);\n\n    auto* allowedGroupInfo = getgrnam(app::MSENTER_ONLY_GROUP);\n    if(allowedGroupInfo == nullptr){\n        logCritical << qtr(\"group %1 does not exist on your \"\n                       \"system but is required. Please add it.\").arg(app::MSENTER_ONLY_GROUP);\n        exit(1);\n    }\n\n    auto realUid = os::getuid();\n    if (realUid != targetPidSt.st_uid ) {\n        logCritical << qtr(\"Target process belongs to a \"\n                       \"different user.\");\n        exit(1);\n    }\n\n    if( allowedGroupInfo->gr_gid != targetPidSt.st_gid){\n        logCritical << qtr(\"The group of the target process is not '%1'\")\n                       .arg(app::MSENTER_ONLY_GROUP);\n        exit(1);\n    }\n    bool setnsSuccess = false;\n    bool openOrigCwdSuccess = false;\n    try {\n        // Remember the old working dir (setns changes it). Note that it is\n        // not possible to fchdir back to oldWdFd, because doing\n        // so leads also back to the original mountspace...\n        // Opening the dir as *real* user is essential on NFS -> permissions.\n        os::seteuid(realUid);\n        const int oldWdFd = os::open(\".\", O_DIRECTORY);\n        openOrigCwdSuccess = true;\n\n        os::seteuid(os::getsuid());\n\n        int mntFd = os::openat(targetprocDirFd , \"ns/mnt\" , O_RDONLY);\n        os::setns(mntFd, CLONE_NEWNS);\n        setnsSuccess = true;\n        os::close(mntFd);\n\n        // Drop root privileges, irrevocable.\n        os::setuid(realUid);\n\n        try {\n            const int newWdFd = osutil::reopenFdByPath( oldWdFd, O_DIRECTORY, true, false);\n            auto closeNewWdLater = finally([&newWdFd] {  closeVerbose(newWdFd);  });\n            // reenter the working directory in new mount namespace which is surely the same\n            // filesystem entry\n            os::fchdir(newWdFd);\n        } catch (const os::ExcOs& e) {\n            // Should almost never happen. In that case\n            // enter the working dir in the original mount-namespace.\n            // File-events, referring to relative paths might then be lost.\n            logWarning << qtr(\"Failed to enter working directory within the new mount-namespace. \"\n                              \"Entering the original one instead. Some file-events might \"\n                              \"be lost. Reason: %1\").arg(e.what());\n            os::fchdir(oldWdFd);\n        }\n        os::close(oldWdFd);\n\n    } catch (const os::ExcOs & ex) {\n        logCritical << ex.what();        \n        if(! openOrigCwdSuccess){\n            QString moreInfo;\n            if( ex.errorNumber() == ESTALE){\n                moreInfo = qtr(\"Most probably it was deleted. \");\n            }\n            logCritical << qtr(\"Failed to open the working directory. %1\").arg(moreInfo);\n        }\n        if(! setnsSuccess){\n            // most probably setns failed, because we are not suid. Since this program is execve'd\n            // itself, above error message might not be visible for the user. So be gentle and\n            // do not exit here.\n            logCritical << qtr(\"Entering the mount-namespace at %1 failed, file events are not \"\n                               \"captured...\")\n                           .arg(osutil::findPathOfFd<QByteArray>(targetprocDirFd).data());\n\n        }\n        os::setuid(os::getuid());\n    }\n\n    execvpe(filename, commandArgv, envp);\n    int err = errno;\n    // Only get here on error.\n    // Failed to launch the executable - print error and mimic shell-return-codes.\n    translation::init();\n    QErr() << filename << \": \" << translation::strerror_l(err) << \"\\n\";\n    // In bash and zsh at least the following special cases exist:\n    switch (err) {\n    case EACCES: exit(126);\n    case ENOENT: exit(127);\n    // TODO: 128 + errno?\n    default: exit(1);\n    }\n\n}\n"
  },
  {
    "path": "src/shournal-run-fanotify/msenter.h",
    "content": "#pragma once\n\n#include <unistd.h>\n\nnamespace msenter {\n\n[[noreturn]]\nvoid run(pid_t targetPid, const char *filename, char *commandArgv[], char **envp);\n[[noreturn]]\nvoid run(const char* filename, char *commandArgv[], char **envp,\n         int targetprocDirFd);\n\n}\n\n\n\n"
  },
  {
    "path": "src/shournal-run-fanotify/orig_mountspace_process.cpp",
    "content": "/*\n * To allow for leaving an observed mount namespace, which is e.g. helpful,\n * if another *interactive and observed* shell is launched within an observed\n * mount-namespace, the first time shournal starts (for a gien user)\n * a seperate process is started,\n * whose only task is to wait forever (until signaled). The process creates\n * a PID-file at the temporary dir, belongs to the real user and has the msenter group.\n * */\n\n#include <fcntl.h>\n\n#include <QStandardPaths>\n#include <QFileInfo>\n#include <QLockFile>\n\n#include \"orig_mountspace_process.h\"\n#include \"os.h\"\n#include \"app.h\"\n#include \"qoutstream.h\"\n#include \"logger.h\"\n#include \"msenter.h\"\n#include \"cleanupresource.h\"\n#include \"osutil.h\"\n#include \"fdentries.h\"\n\nusing osutil::closeVerbose;\n\nnamespace  {\n\nconst char* LOCK = \"_LOCK\";\n\nQString userPidPath(){\n    return pathJoinFilename(QStandardPaths::writableLocation(QStandardPaths::TempLocation),\n            QString(app::SHOURNAL) + \"-orig-mountnamespace-\"\n            + os::getUserName<QString>()) ;\n\n}\n\n/// The grandchild sets up a never ending process with the real userid\n/// and the msenter-groupid\n[[noreturn]]\nvoid setupIfNotExistAsChild(const QString& pidPath) {\n\n    auto* allowedGroupInfo = getgrnam(app::MSENTER_ONLY_GROUP);\n    if(allowedGroupInfo == nullptr){\n        logCritical << qtr(\"group %1 does not exist on your \"\n                           \"system but is required to setup the original \"\n                           \"mountnamespace-process. Please add it.\").arg(app::MSENTER_ONLY_GROUP);\n        exit(1);\n    }\n\n    os::seteuid(0);\n    os::setgid(allowedGroupInfo->gr_gid);\n    os::setuid(os::getuid());\n\n    QLockFile lockFile(pidPath + LOCK);\n    if(! lockFile.lock()){\n         logCritical << qtr(\"failed to obtain lock on %1\").arg(pidPath + LOCK);\n    }\n    if(QFileInfo::exists(pidPath)){\n        // file was created meanwhile\n        exit(0);\n    }\n    logDebug << \"creating new pid-file for the original mount-namespace\";\n\n    QFile pidFile(pidPath);\n    {\n        if(! pidFile.open(QFile::OpenModeFlag::WriteOnly)){\n            // should never happen\n            logCritical << \"Failed to open pidfile\" << pidPath;\n            exit(1);\n        }\n        QTextStream stream(&pidFile);\n        stream << os::getpid();\n    }\n    pidFile.close();\n    lockFile.unlock();\n\n    // we are a daemon. Good practice: enter /, close all fds\n    os::chdir(\"/\");\n    logger::disableLogToFile();\n    for(const int fd : osutil::FdEntries()){\n        try {\n            os::close(fd);\n        } catch (const os::ExcOs& e) {\n            QIErr() << e.what();\n        }\n    }\n\n    // wait for typical signals to exit\n    osutil::waitForSignals();\n    pidFile.remove();\n    exit(0);\n}\n\n[[noreturn]]\nvoid execAsRealsUser(const char* filename, char *commandArgv[], char **envp){\n    os::setuid(os::getuid());\n    os::exec(filename, commandArgv, envp);\n    // never get here\n}\n\n} // namespace\n\n/// Create a grandchild-process, which creates a pid-file (in a race-free way).\nvoid orig_mountspace_process::setupIfNotExist()\n{\n    const QString pidPath = userPidPath();\n    if(QFileInfo::exists(pidPath)){\n        return;\n    }\n    auto pid = os::fork();\n    if(pid != 0){\n        // parent returns\n        return;\n    }\n    // Prevent receiving signals for the process-group\n    os::setsid();\n    setupIfNotExistAsChild(pidPath);\n    // never get here\n}\n\n\n/// Enter a 'original' mount-namespace which was created before (setupIfNotExist)\n/// and excute a command in it. If there is no pid-file, assume, there is no such\n/// process yet and simply execve as 'real' user.\n/// In case of an error (pid-file points to invalid process, etc.) remove\n/// the pid-file and execve as 'real' user.\nvoid orig_mountspace_process::msenterOrig(const char *filename,\n                                          char *commandArgv[], char **envp)\n{\n    QFile pidFile(userPidPath());\n    if(! pidFile.open(QFile::OpenModeFlag::ReadOnly)){\n        // may happen, if file does not exist (we are the first process).\n        // If it exists, report the error (yes, short race-condition here...)\n        if(pidFile.exists()){\n            logWarning << qtr(\"Failed to open pidfile although\"\n                              \" it exists, not joining original namespace:\") << pidFile.fileName();\n        } else {\n            logDebug << \"pidPath does not exist, not joining original namespace\";\n        }\n        execAsRealsUser(filename, commandArgv, envp);\n    }\n\n    QTextStream stream(&pidFile);\n    try {\n        auto targetPid = qVariantTo_throw<pid_t>(stream.readLine());\n        int targetprocDirFd = os::open(\"/proc/\" + std::to_string(targetPid), O_DIRECTORY);\n        auto closeTargetprocDirFdLater = finally([&targetprocDirFd] {\n            closeVerbose(targetprocDirFd);\n        });\n        msenter::run(filename, commandArgv, envp, targetprocDirFd);\n    } catch (const std::exception& e) {\n        logWarning << qtr(\"Cannot join original mount-namespace: %1. \"\n                          \"Maybe shournal-run was killed? Removing obsolete \"\n                          \"pid-file at %2...\").arg(e.what()).arg(pidFile.fileName());\n        if(! pidFile.remove()){\n            logWarning << qtr(\"Removing pid-file %2 failed.\").arg(pidFile.fileName());\n        }\n        execAsRealsUser(filename, commandArgv, envp);\n    }\n}\n"
  },
  {
    "path": "src/shournal-run-fanotify/orig_mountspace_process.h",
    "content": "#pragma once\n\n\nnamespace orig_mountspace_process {\n\n    void setupIfNotExist();\n    [[noreturn]]\n    void msenterOrig(const char* filename, char *commandArgv[], char **envp);\n\n}\n\n\n"
  },
  {
    "path": "src/shournal-run-fanotify/shournal-run-fanotify.cpp",
    "content": "#include <csignal>\n#include <fcntl.h>\n#include <cassert>\n\n#include <QCoreApplication>\n#include <QVarLengthArray>\n\n\n#include \"qoptargparse.h\"\n#include \"qoptvarlenarg.h\"\n#include \"excoptargparse.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"excos.h\"\n#include \"filewatcher_fan.h\"\n#include \"msenter.h\"\n#include \"logger.h\"\n#include \"fdcommunication.h\"\n\n\n#include \"exccfg.h\"\n#include \"settings.h\"\n#include \"util.h\"\n#include \"qoutstream.h\"\n#include \"util.h\"\n#include \"translation.h\"\n#include \"app.h\"\n#include \"qexcdatabase.h\"\n#include \"mount_controller.h\"\n#include \"orig_mountspace_process.h\"\n#include \"cpp_exit.h\"\n#include \"db_connection.h\"\n#include \"storedfiles.h\"\n#include \"socket_message.h\"\n#include \"shournal_run_common.h\"\n\nusing fdcommunication::SocketCommunication;\nusing socket_message::E_SocketMsg;\nusing namespace shournal_run_common;\n\nnamespace  {\n\n/// Uncaught exception handler\nvoid onterminate() {\n    try {\n        auto unknown = std::current_exception();\n        if (unknown) {\n            std::rethrow_exception(unknown);\n        }\n    } catch (const std::exception& e) {\n        logCritical << e.what() << \"\\n\";\n    } catch (...) {\n        logCritical << \"unknown exception occurred\\n\";\n    }\n}\n\n[[noreturn]]\nvoid callFilewatcherSafe(FileWatcher& fwatcher){\n\n    try {\n        fwatcher.run();\n    } catch (const os::ExcOs & ex) {\n        logCritical << qtr(\"Sorry, need to close: \") << ex.what();\n    } catch(const ExcMountCtrl & ex){\n        logCritical << qtr(\"mount failed: \") << ex.descrip();\n    }\n    if(fwatcher.sockFd() != -1){\n        SocketCommunication fdCom;\n        fdCom.setSockFd(fwatcher.sockFd());\n        fdCom.sendMsg(int(E_SocketMsg::SETUP_FAIL));\n    }\n    cpp_exit(1);\n}\n\n} //  namespace\n\n\nint shournal_run_main(int argc, char *argv[])\n{    \n    // Since we are waiting for other processes to finish, ignore typical\n    // signals.\n    osutil::setInertSighandler(os::catchableTermSignals());\n\n    // Using app::SHOURNAL for several common paths (database, config) used\n    // by QStandardPaths but app::CURRENT_NAME for others (log-filename)\n    app::setupNameAndVersion(app::SHOURNAL_RUN_FANOTIFY);\n\n    if(! translation::init()){\n        logWarning << \"Failed to initialize translation\";\n    }\n    if(os::geteuid() != 0){\n        QIErr() << qtr(\"%1 seems to lack the suid-bit (SETUID) for root. You can correct \"\n                       \"that by\\n\"\n                       \"chown root %1 && chmod u+s %1\").arg(app::CURRENT_NAME);\n        // but continue to allow for e.g. msenter-orig to exec the process anyway...\n    }\n\n\n    logger::setup(app::CURRENT_NAME);\n\n    std::set_terminate(onterminate);\n\n    if(! shournal_common_init()){\n        logCritical << qtr(\"Fatal error: failed to initialize custom Qt conversion functions\");\n        cpp_exit(1);\n    }\n\n    // ignore first arg (command to this app)\n    --argc;\n    ++argv;\n    QOptArgParse parser;\n    parser.setHelpIntroduction(qtr(\"Observation backend for <%1>. \"\n                                   \"Not meant to be called by users directly, \"\n                                   \"you would rather call %1 without trailing '-run'.\"\n                                      ).arg(app::SHOURNAL) + \"\\n\");\n    QOptArg argVersion(\"v\", \"version\", qtr(\"Display version\"), false);\n    parser.addArg(&argVersion);\n\n    // for communication with the shell-integration:\n    QOptArg argSocketFd(\"\", \"socket-fd\", \"\" );\n    argSocketFd.setInternalOnly(true);\n    parser.addArg(&argSocketFd);\n\n    QOptArg argExec(\"e\", \"exec\", qtr(\"Execute and observe the passed program \"\n                                     \"and its arguments (this argument has to be last).\"),\n                    false);\n    argExec.setFinalizeFlag(true);\n    parser.addArg(&argExec);\n\n    QOptArg argExecFilename(\"\", \"exec-filename\", qtr(\"This is an advanced option. \"\n                                                     \"In most cases the first argument of a \"\n                                                     \"program is the program name. For \"\n                                                     \"example for login-shells this does \"\n                                                     \"not have to be the case. If this \"\n                                                     \"argument is provided, that filename \"\n                                                     \"is used instead of argv[0]\"));\n    parser.addArg(&argExecFilename);\n    argExecFilename.addRequiredArg(&argExec);\n\n    // Some variables like $TMPDIR are cleared in setuid-programs.\n    // We allow them to be passed within argv and apply them during execve.\n    // *after* setuid to original user.\n    // see e.g. http://lists.gnu.org/archive/html/bug-glibc/2003-08/msg00076.html\n    QOptVarLenArg argEnv(\"\", \"env\", qtr(\"Specify an arbitrary number of environment variables. \"\n                                        \"The first entry (integer) specifies the count \"\n                                        \"of all the latter. The last entry is used internally and \"\n                                        \"must be the string 'SHOURNAL_DUMMY_NULL=1'\"));\n    parser.addArg(&argEnv);\n\n    // TODO: currently only interface compatibility.\n    // We could indeed fork as well, at least in the exec-case.\n    QOptArg argFork(\"\", \"fork\",\n                         qtr(\"NOT USED\"), false);\n    argFork.setInternalOnly(true);\n    parser.addArg(&argFork);\n\n    QOptArg argTmpDir(\"\", \"tmpdir\",\n                      qtr(\"Use the given TMPDIR (for non security-relevant stuff). \"\n                          \"As a setuid binary some variables are cleared from \"\n                          \"the environment (see also man 8 ld.so)\"));\n    // We expect to be called by the binary 'shournal' or the\n    // shell integration, which both pass $TMPDIR using --tmpdir, so no need\n    // to make it public.\n    argTmpDir.setInternalOnly(true);\n    parser.addArg(&argTmpDir);\n\n    QOptArg argMsenter(\"\", \"msenter\", qtr(\"<pid>. Must be passed along with '%1'. Execute the \"\n                                          \"given command in an existing mountspace which was \"\n                                          \"previously created via %2 %1\")\n                            .arg(argExec.name(), app::CURRENT_NAME));\n    argMsenter.addRequiredArg(&argExec);\n    parser.addArg(&argMsenter);\n\n    QOptArg argMsenterOrig(\"\", \"msenter-orig-mountspace\",\n                           qtr(\"Must be passed along with '%1'. Execute the \"\n                               \"given command in the 'original' mount-namespace \"\n                               \"created the first time %2 observed a process.\")\n                            .arg(argExec.name(), app::CURRENT_NAME), false);\n    argMsenterOrig.addRequiredArg(&argExec);\n    parser.addArg(&argMsenterOrig);\n\n    QOptArg argVerbosity(\"\", \"verbosity\", qtr(\"How much shall be printed to stderr. Note that \"\n                                              \"for 'dbg' shournal-run must not be a 'Release'-build.\"));\n    argVerbosity.setAllowedOptions(app::VERBOSITIES);\n    parser.addArg(&argVerbosity);\n\n    QOptArg argShellSessionUUID(\"\", \"shell-session-uuid\", qtr(\"uuid as base64-encoded string\"));\n    argShellSessionUUID.setInternalOnly(true);\n    parser.addArg(&argShellSessionUUID);\n\n    QOptArg argNoDb(\"\", \"no-db\", qtr(\"For debug purposes: do not write to\"\n                                     \"database after event processing\"), false);\n    parser.addArg(&argNoDb);\n\n    QOptArg argPrintSummary(\"\", \"print-summary\",\n                         qtr(\"Print a short summary after \"\n                             \"event processing finished.\"), false);\n    parser.addArg(&argPrintSummary);\n    auto argCfgDir = mkarg_cfgdir();\n    parser.addArg(&argCfgDir);\n    auto argDataDir = mkarg_datadir();\n    parser.addArg(&argDataDir);\n\n    try {\n        parser.parse(argc, argv);\n\n        if(argVerbosity.wasParsed()){\n            QByteArray verbosity = argVerbosity.getOptions(1).first().toLocal8Bit();\n            logger::setVerbosityLevel(verbosity.constData());\n        } else {\n            logger::setVerbosityLevel(QtMsgType::QtWarningMsg);\n        }\n\n        char** cmdEnv;\n        if(argEnv.wasParsed()){\n            cmdEnv = argEnv.vals().argv;\n            auto envArgc = argEnv.vals().len;\n            assert(strcmp(cmdEnv[envArgc-1], \"SHOURNAL_DUMMY_NULL=1\" ) == 0);\n            cmdEnv[envArgc-1] = nullptr;\n        } else {\n            cmdEnv = environ;\n        }\n\n        char* cmdFilename = nullptr;\n        char** cmdArgv = nullptr;\n        if(argExec.wasParsed()){\n            cmdArgv = parser.rest().argv;\n            if(argExecFilename.wasParsed()){\n                cmdFilename = argExecFilename.vals().argv[0];\n            } else {\n                cmdFilename = parser.rest().argv[0];\n            }\n        }        \n\n        // Don't waste time, msenter has to run as early as possible\n        if(argMsenter.wasParsed()){\n            msenter::run(argMsenter.getValue<pid_t>(), cmdFilename, cmdArgv, cmdEnv);\n        }\n\n        if(argExec.wasParsed() &&\n                argSocketFd.wasParsed() ) {\n            QIErr() << qtr(\"%1 and %2 are mutually exclusive\").arg(argExec.name(), argSocketFd.name());\n            cpp_exit(1);\n        }\n\n        if(argVersion.wasParsed()){\n            QOut() << app::CURRENT_NAME << qtr(\" version \") << app::version().toString() << \"\\n\";\n            cpp_exit(0);\n        }\n\n        auto & sets = Settings::instance();\n        if(argCfgDir.wasParsed()){\n            sets.setUserCfgDir(argCfgDir.getValue<QString>());\n        }\n        if(argDataDir.wasParsed()){\n            sets.setUserDataDir(argDataDir.getValue<QString>());\n        }\n\n        // has to be before argExec\n        if(argMsenterOrig.wasParsed()){\n            // do not crash here if called from shell-integration, if\n            // we forgot to make shournal suid...\n            bool weAreAnotheUser = os::geteuid() != os::getuid();\n            if(weAreAnotheUser) os::seteuid(os::getuid());\n            logger::enableLogToFile(app::CURRENT_NAME);\n            if(weAreAnotheUser) os::seteuid(0);\n\n            // [[noreturn]]\n            orig_mountspace_process::msenterOrig(cmdFilename, cmdArgv, cmdEnv);\n        }\n\n        // ------------------------------  //\n\n        // In file observation mode (or invalid commandline-input)\n\n        // Starting from this line, we effectively work as non-root\n        // and only (shortly) switch to root to unshare the mount namespace\n        // and initialize fanotify.\n        // Also load settings and enable logging already, *before* unsharing the\n        // mount-namespace, so we do not log events, we create ourselves.\n        os::seteuid(os::getuid());\n\n        FileWatcher fwatcher;\n        fwatcher.setCommandEnvp(cmdEnv);\n        if(argExecFilename.wasParsed()){\n            // fwatcher command-filename must otherwise be null, to allow\n            // for correct storing of command in db (no duplicate first arg\n            // if not necessary!)\n            fwatcher.setCommandFilename(argExecFilename.vals().argv[0]);\n        }\n        if(argTmpDir.wasParsed()){\n            fwatcher.setTmpDir(argTmpDir.getValue<QByteArray>());\n        }\n\n        fwatcher.setStoreToDatabase(! argNoDb.wasParsed());\n        fwatcher.setPrintSummary(argPrintSummary.wasParsed());\n\n\n        try {\n            logger::enableLogToFile(app::CURRENT_NAME);\n            sets.load();\n            StoredFiles::mkpath();\n        } catch(const qsimplecfg::ExcCfg & ex){\n            QIErr() << qtr(\"Failed to load config file: \") << ex.descrip();\n            cpp_exit(1);\n        } catch(const QExcDatabase & ex){\n            QIErr() << qtr(\"Database-operation failed: \") << ex.descrip();\n            cpp_exit(1);\n        }  catch (const QExcIo& ex){\n            logCritical << qtr(\"IO-operation failed: \") << ex.descrip();\n            cpp_exit(1);\n        } catch (const os::ExcOs& ex){\n            logCritical << ex.what();\n            cpp_exit(1);\n        }\n\n        if(argShellSessionUUID.wasParsed()){\n            fwatcher.setShellSessionUUID(\n                        QByteArray::fromBase64(argShellSessionUUID.getValue<QByteArray>()));\n        }\n\n        if(argSocketFd.wasParsed()){\n            int socketFd = argSocketFd.getValue<int>(-1);\n            os::setFdDescriptorFlags(socketFd, FD_CLOEXEC);\n            fwatcher.setSockFd(socketFd);\n            callFilewatcherSafe(fwatcher); // [[noreturn]]\n        }\n\n        if(argExec.wasParsed()){\n            assert(!argMsenterOrig.wasParsed());\n            auto externCmd = parser.rest();\n            fwatcher.setArgv(externCmd.argv, externCmd.len);\n            callFilewatcherSafe(fwatcher); // [[noreturn]]\n        }\n\n        if(parser.rest().len != 0){\n            QIErr() << qtr(\"Invalid parameters passed: %1.\\n\"\n                           \"Show help with --help\").\n                       arg( argvToQStr(parser.rest().len, parser.rest().argv));\n            cpp_exit(1);\n        }\n        QIErr() << \"No action specified\";\n\n    } catch (const ExcOptArgParse & ex) {\n        QIErr() << qtr(\"Commandline seems to be erroneous:\")\n                << ex.descrip();\n    }\n    cpp_exit(1);\n\n\n}\n\nint main(int argc, char *argv[])\n{\n    try {\n        shournal_run_main(argc, argv);\n    } catch (const ExcCppExit& e) {\n        return e.ret();\n    }\n}\n\n\n\n"
  },
  {
    "path": "test/CMakeLists.txt",
    "content": "\n\ninclude_directories(\n    ../kernel\n    ../src/common\n    ../src/common/qsimplecfg\n    ../src/common/oscpp\n    ../src/common/util\n    ../src/common/qsqlthrow\n    )\n\nenable_testing()\n\n\nfind_package(Qt5Test REQUIRED)\n\nadd_definitions( -DSHOURNALTEST_SQLITE_v_2_2=\"${CMAKE_CURRENT_SOURCE_DIR}/sqlite_sample_db_v2_2\")\n\nadd_executable(runTests\n    main.cpp\n    autotest.h\n    test_cfg.cpp\n    test_pathtree.cpp\n    test_db_controller.cpp\n    test_cxxhash.cpp\n    test_fileeventhandler.cpp\n    test_fdcommunication.cpp\n    test_osutil.cpp\n    test_qformattedstream.cpp\n    test_qoptargparse.cpp\n    test_util.cpp\n    integration_test_shell.cpp\n    helper_for_test.cpp\n)\n\nadd_test(NAME tests COMMAND runTests)\n\ntarget_link_libraries(runTests\n    Qt5::Test\n    lib_shournal_common\n    )\n\n\n# run tests post build:\n# add_custom_command( TARGET runTests\n#      COMMENT \"Run tests\"\n#      POST_BUILD\n#      WORKING_DIRECTORY ${CMAKE_BINARY_DIR}\n#      COMMAND runTests\n# )\n\nset(CMAKE_AUTOMOC ON)\nset(CMAKE_INCLUDE_CURRENT_DIR ON)\n\n"
  },
  {
    "path": "test/autotest.h",
    "content": "\n#pragma once\n\n#include <QTest>\n#include <QList>\n#include <QString>\n#include <QSharedPointer>\n#include <QStandardPaths>\n\n#include <memory>\n\n#include \"compat.h\"\n#include \"qoutstream.h\"\n#include \"subprocess.h\"\n#include \"qoptargparse/qoptargparse.h\"\n#include \"app.h\"\n#include \"helper_for_test.h\"\n#include \"settings.h\"\n#include \"logger.h\"\n\n\nclass ShournalTestGlobals {\npublic:\n    subprocess::Args_t integrationShellArgs;\n    std::string integrationSetupCommand;\n};\n\n\n\nnamespace AutoTest\n{\n\ninline ShournalTestGlobals& globals(){\n    static ShournalTestGlobals globals;\n    return globals;\n}\n\n\ntypedef QList<QObject*> TestList;\n\ninline TestList& testList()\n{\n    static TestList list;\n    return list;\n}\n\ninline bool findObject(QObject* object)\n{\n    TestList& list = testList();\n    if (list.contains(object))\n    {\n        return true;\n    }\n    foreach (QObject* test, list)\n    {\n        if (test->objectName() == object->objectName())\n        {\n            return true;\n        }\n    }\n    return false;\n}\n\ninline void addTest(QObject* object)\n{\n    TestList& list = testList();\n    if (!findObject(object))\n    {\n        list.append(object);\n    }\n}\n\ninline int run(int argc, char *argv[])\n{\n    if(! shournal_common_init()){\n        QIErr() << qtr(\"Fatal error: failed to initialize custom Qt conversion functions\");\n        exit(1);\n    }\n    logger::setup(\"shournal-test\");\n    logger::setVerbosityLevel(QtMsgType::QtWarningMsg);\n\n    // ignore first arg (command to this app)\n    --argc;\n    ++argv;\n\n    QOptArgParse parser;\n    QOptArg argVerbosity(\"\", \"verbosity\", qtr(\"How much shall be printed to stderr. Note that \"\n                                              \"for 'dbg' shournal must not be a 'Release'-build, \"\n                                              \"dbg-messages are lost in Release-mode.\"));\n    argVerbosity.setAllowedOptions({\"dbg\",\n                                #if QT_VERSION >= QT_VERSION_CHECK(5, 5, 0)\n                                    \"info\",\n                                #endif\n                                    \"warning\", \"critical\"});\n    parser.addArg(&argVerbosity);\n\n    QOptArg argIntegrationTest(\"\", \"integration\",\n                               \"Run integration tests, instead of normal tests\", false);\n    parser.addArg(&argIntegrationTest);\n\n\n    QOptArg argShell(\"\", \"shell\", \"The shell used for the intgeration tests, including\"\n                                  \" arguments, separated by whitespace\");\n    argShell.addRequiredArg(&argIntegrationTest);\n    parser.addArg(&argShell);\n\n    parser.parse(argc, argv);\n\n    if(argVerbosity.wasParsed()){\n        QByteArray verbosity = argVerbosity.getOptions(1).first().toLocal8Bit();\n        logger::setVerbosityLevel(verbosity.constData());\n    }\n\n    if(argIntegrationTest.wasParsed()){\n        os::setenv<QByteArray>(\"_SHOURNAL_IN_INTEGRATION_TEST_MODE\", \"true\");\n        app::setupNameAndVersion(\"shournal-integration-test\");\n        if(! app::inIntegrationTestMode()){\n            QIErr() << \"Failed to enable integration test mode.\";\n            exit(1);\n        }\n\n        if(! argShell.wasParsed()){\n            QIErr() << \"missing argument\" << argShell.name();\n            exit(1);\n        }\n        const auto shellArgs = argShell.getValue<QString>().split(\" \", Qt::SkipEmptyParts);\n        if(shellArgs.first().startsWith(\"bash\")){\n            globals().integrationSetupCommand = \"export HISTFILE=/dev/null\";\n        } else if(shellArgs.first().startsWith(\"zsh\")){\n            globals().integrationSetupCommand =\n                    \"unset HISTFILE\\n\"\n                    // search for 42 in integration_test_shell.cpp for the rationale\n                    \"[ $_shournal_run_backend='shournal-run-fanotify' ] && \"\n                    \"[ -z \\\"${_shournal_is_running+x}\\\" ] && exit 42\\n\";\n        } else {\n            QIErr() << \"currently only bash and zsh are supported.\";\n            exit(1);\n        }\n\n        for(const QString& s : shellArgs){\n            globals().integrationShellArgs.push_back(s.toStdString());\n        }\n    } else {\n        QCoreApplication::setApplicationName(QString(app::SHOURNAL) + \"-test\");\n        QCoreApplication::setApplicationVersion( app::version().toString());\n    }\n\n    QStandardPaths::setTestModeEnabled(true);\n    // delete remaining paths from last test (if any)\n    testhelper::deletePaths();\n\n    int ret = 0;\n\n    foreach (QObject* test, testList())\n    {\n        if(argIntegrationTest.wasParsed()){\n            if(! test->objectName().startsWith(\"IntegrationTest\")){\n                continue;\n            }\n        } else{\n            if(test->objectName().startsWith(\"IntegrationTest\")){\n                continue;\n            }\n\n        }\n        ret += QTest::qExec(test, {});\n    }\n    if(ret != 0){\n        QErr() << \"\\n**** AT LEAST ONE TEST FAILED! ****\\n\\n\";\n    }\n\n    return ret;\n}\n}\n\ntemplate <class T>\nclass Test\n{\npublic:\n    QSharedPointer<T> child;\n\n    Test(const QString& name) : child(new T)\n    {\n        child->setObjectName(name);\n        AutoTest::addTest(child.data());\n    }\n};\n\n#define DECLARE_TEST(className) static Test<className> t(#className);\n\n#define TEST_MAIN \\\n    int main(int argc, char *argv[]) \\\n{ \\\n    return AutoTest::run(argc, argv); \\\n    }\n\n"
  },
  {
    "path": "test/helper_for_test.cpp",
    "content": "\n#include <QStandardPaths>\n#include <QCoreApplication>\n#include <QDebug>\n#include <QTextStream>\n\n#include \"helper_for_test.h\"\n#include \"util.h\"\n#include \"app.h\"\n#include \"exccommon.h\"\n#include \"qfilethrow.h\"\n\nnamespace  {\n\nconst QList<QStandardPaths::StandardLocation>& locations(){\n    static const QList<QStandardPaths::StandardLocation> locs = {\n        QStandardPaths::ConfigLocation,\n        QStandardPaths::DataLocation,\n        QStandardPaths::CacheLocation};\n    return locs;\n}\n\n\n} //  namespace\n\n/// Set application-name in a unique way and enable test mode in QStandardPaths,\n/// so application stuff is saved somewhere else. In the end, remove the\n/// respective directories (cleanupPaths).\nvoid testhelper::setupPaths()\n{\n    for(const auto& l : locations()){\n        const QString path = QStandardPaths::writableLocation(l);\n        QDir d(path);\n        if( ! d.mkpath(path)){\n            throw QExcIo(QString(\"Failed to create %1\").arg(path));\n        }\n    }\n}\n\n\nvoid testhelper::deletePaths()\n{\n    if(! QStandardPaths::isTestModeEnabled()){\n        throw QExcProgramming(QString(__func__) + \" called while test mode disabled\");\n    }\n    for(const auto& l : locations()){\n        const QString path = QStandardPaths::writableLocation(l);\n        QDir d(path);\n        d.removeRecursively();\n    }\n}\n\nvoid testhelper::deleteDatabaseDir()\n{\n    if(! QStandardPaths::isTestModeEnabled()){\n        throw QExcProgramming(QString(__func__) + \" called while test mode disabled\");\n    }\n    const QString path = QStandardPaths::writableLocation(QStandardPaths::DataLocation);\n    QDir d(path);\n    d.removeRecursively();\n}\n\n\nstd::shared_ptr<QTemporaryDir> testhelper::mkAutoDelTmpDir()\n{\n    auto pDir = std::make_shared<QTemporaryDir>();\n    if (! pDir->isValid()) {\n         throw QExcIo(\"Failed to mk temp dir\");\n    }\n    pDir->setAutoRemove(true);\n    return pDir;\n}\n\nvoid testhelper::writeStringToFile(const QString &filepath, const QString &str)\n{\n    QFileThrow f(filepath);\n    f.open(QFile::WriteOnly | QFile::Text);\n\n    QTextStream stream(&f);\n    stream << str;\n}\n\n/// Write repeated string pattern of len to the file at path\nvoid testhelper::writeStuffToFile(const QString &fpath, int len){\n    const QByteArray stuff(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\");\n    QFileThrow f(fpath);\n    f.open(QFile::WriteOnly | QFile::Text);\n    for(int i=0; i < len / stuff.size(); i++){\n        f.write(stuff);\n    }\n    int rest = len % stuff.size();\n    if(rest){\n        auto stuffrest = QByteArray::fromRawData(stuff, rest);\n        f.write(stuffrest);\n    }\n\n}\n\nQString testhelper::readStringFromFile(const QString &fpath)\n{\n    QFileThrow f(fpath);\n    f.open(QFile::ReadOnly | QFile::Text);\n\n    QTextStream stream(&f);\n    return stream.readAll();\n}\n\n\nbool testhelper::copyRecursively(const QString &srcFilePath,\n                                 const QString &tgtFilePath)\n{\n    QFileInfo srcFileInfo(srcFilePath);\n    if (srcFileInfo.isDir()) {\n        QDir targetDir(tgtFilePath);\n        targetDir.cdUp();\n        if (!targetDir.mkdir(QFileInfo(tgtFilePath).fileName()))\n            return false;\n        QDir sourceDir(srcFilePath);\n        QStringList fileNames = sourceDir.entryList(QDir::Files | QDir::Dirs | QDir::NoDotAndDotDot | QDir::Hidden | QDir::System);\n        foreach (const QString &fileName, fileNames) {\n            const QString newSrcFilePath\n                    = srcFilePath + QLatin1Char('/') + fileName;\n            const QString newTgtFilePath\n                    = tgtFilePath + QLatin1Char('/') + fileName;\n            if (!copyRecursively(newSrcFilePath, newTgtFilePath))\n                return false;\n        }\n    } else {\n        if (!QFile::copy(srcFilePath, tgtFilePath))\n            return false;\n    }\n    return true;\n}\n"
  },
  {
    "path": "test/helper_for_test.h",
    "content": "#pragma once\n\n#include <QTemporaryDir>\n#include <memory>\n\nnamespace testhelper {\nvoid setupPaths();\nvoid deletePaths();\nvoid deleteDatabaseDir();\n\nstd::shared_ptr<QTemporaryDir> mkAutoDelTmpDir();\n\nvoid writeStringToFile(const QString& filepath, const QString& str);\nvoid writeStuffToFile(const QString &fpath, int len);\n\nQString readStringFromFile(const QString& fpath);\n\nbool copyRecursively(const QString &srcFilePath,\n                     const QString &tgtFilePath);\n\n}\n\n\n\n"
  },
  {
    "path": "test/integration_test_shell.cpp",
    "content": "\n#include <QDebug>\n\n#include \"autotest.h\"\n#include \"qoutstream.h\"\n#include \"util.h\"\n#include \"osutil.h\"\n#include \"helper_for_test.h\"\n#include \"database/db_connection.h\"\n#include \"database/db_controller.h\"\n#include \"database/file_query_helper.h\"\n#include \"database/query_columns.h\"\n#include \"qsimplecfg/cfg.h\"\n#include \"settings.h\"\n#include \"database/storedfiles.h\"\n#include \"safe_file_update.h\"\n\nusing subprocess::Subprocess;\nusing db_controller::QueryColumns;\n\nnamespace  {\n\nvoid writeLine(int fd, const std::string& line){\n    os::write(fd, line + \"\\n\");\n}\n\nos::Pipes_t prepareHighFdNumberPipe(){\n    auto pipe_ = os::pipe(0, false); // CLOEXEC irrelevant, dup2 below...\n    int highFd = osutil::findHighestFreeFd();\n    os::dup2(pipe_[0], highFd);\n    os::close(pipe_[0]);\n    pipe_[0] = highFd;\n\n    highFd = osutil::findHighestFreeFd(highFd - 1);\n    os::dup2(pipe_[1], highFd);\n    os::close(pipe_[1]);\n    pipe_[1] = highFd;\n    os::setenv<QByteArray>(\"_SHOURNAL_INTEGRATION_TEST_PIPE_FD\",\n               QByteArray::number(pipe_[1]));\n\n    return pipe_;\n}\n\n\n/// @return write-end of the pipe passed to the shell-process\nint callWithRedirectedStdin(Subprocess& proc){\n    int oldStdIn = os::dup(STDIN_FILENO); // CLOEXEC irrelevant, dup2 below...\n    auto pipe_ = os::pipe(0);\n    os::dup2(pipe_[0], STDIN_FILENO);\n    os::close(pipe_[0]);\n\n    proc.call(AutoTest::globals().integrationShellArgs);\n    // restore stdin\n    os::dup2(oldStdIn, STDIN_FILENO);\n    os::close(oldStdIn);\n\n    return pipe_[1];\n}\n\n\n} // anonymous namespace\n\n\nclass IntegrationTestShell : public QObject {\n    Q_OBJECT\n\nprivate:\n    void writeReadSettingsToCfg(const QString& readIncludeDir,\n                                qsimplecfg::Cfg& cfg){\n        auto sectRead = cfg[Settings::SECT_READ_NAME];\n        sectRead->getValue(Settings::SECT_READ_KEY_ENABLE, true, true);\n        sectRead->getValue(Settings::SECT_READ_KEY_INCLUDE_PATHS, readIncludeDir, true);\n    }\n\n    void writeScriptSettingToCfg(const QString& includePath,\n                                 const QStringList& fileExtensions,\n                                 qsimplecfg::Cfg& cfg){\n        auto sectRead = cfg[Settings::SECT_SCRIPTS_NAME];\n        sectRead->getValue(Settings::SECT_SCRIPTS_ENABLE, true, true);\n        sectRead->getValue(Settings::SECT_SCRIPTS_INCLUDE_PATHS, includePath, true);\n        sectRead->getValues(Settings::SECT_SCRIPTS_INCLUDE_FILE_EXTENSIONS, fileExtensions, true, \"\\n\");\n    }\n\n\n    /// @param cmd: the command to be executed\n    /// @param setupCommand: command executed before SHOURNAL_ENABLE\n    void executeCmdInbservedShell(const std::string& cmd, const std::string& setupCommand){\n        auto pipe_ = prepareHighFdNumberPipe();\n        Subprocess proc;\n        // pass pipe write end -> wait for async shournal grand-child process\n        proc.setForwardFdsOnExec({pipe_[1]});\n        int writeFd = callWithRedirectedStdin(proc);\n        os::close(pipe_[1]);\n\n        if(! setupCommand.empty()){\n            writeLine(writeFd, setupCommand);\n        }\n        writeLine(writeFd, \"SHOURNAL_ENABLE\");\n        writeLine(writeFd, \"SHOURNAL_SET_VERBOSITY \" +\n                  std::string(logger::msgTypeToStr(logger::getVerbosityLevel())));\n\n        writeLine(writeFd, cmd);\n        writeLine(writeFd, \"SHOURNAL_DISABLE\");\n        writeLine(writeFd, \"exit 123\");\n\n        os::close(writeFd);\n        auto proc_ret = proc.waitFinish();\n        if(proc_ret == 42){\n            QIErr() << QString::fromUtf8(\"As of zsh 5.7.1 the fanotify shell integration backend \"\n                       \"*must* be enabled during zsh-startup (e.g .zshrc) for the \"\n                       \"integration-tests to succeed. Otherwise the first zsh-process \"\n                       \"*may* consume stdin so no commands remain after «exec zsh» \"\n                       \"which is called to preload libshournal-shellwatch.so. \"\n                       \"See also my email 'Unexpected stdin-behavior' from 2021-10-21 \"\n                       \"on the zsh mailing list zsh-workers@zsh.org. Note that zsh \"\n                       \"does not follow posix shell behaviour here: \"\n                       \"«When the shell is using \"\n                       \"standard input and it invokes a command that also uses standard \"\n                       \"input, the shell shall ensure that the standard input file \"\n                       \"pointer points directly after the command it has read when \"\n                       \"the command begins execution».\");\n            throw QExcIllegalArgument(\"Bad integration-test environment (see above)\");\n        }\n\n        QCOMPARE(proc_ret, 123);\n        char c;\n        // wait for shournal grand-child process to finish (close it's write end)\n        os::read(pipe_[0], &c, 1);\n        os::close(pipe_[0]);\n    }\n\n    /// @overload\n    void executeCmdInbservedShell(const QString& cmd, const std::string& setupCommand){\n        executeCmdInbservedShell(cmd.toStdString(), setupCommand);\n    }\n\n\n\n    void cmdWrittenFileCheck(const std::string& cmd, const std::string& fpath,\n                             const std::string& setupCommand){\n        executeCmdInbservedShell(cmd, setupCommand);\n        auto query = file_query_helper::buildFileQuerySmart(\n                    QString::fromStdString(fpath), false);\n        auto cmdIter = db_controller::queryForCmd(query);\n        auto dbCleanup = finally([] { db_connection::close(); });\n        QVERIFY(cmdIter->next());\n        QCOMPARE(cmdIter->value().text, QString::fromStdString(cmd));\n        QVERIFY(QFile(QString::fromStdString(fpath)).remove());\n    }\n\nprivate slots:\n\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    /// Called for each test.\n    void init(){\n        testhelper::deletePaths();\n        // Load settings and delete the config-file. That way,\n        // The version of the cfg-file is also set appropriately.\n        Settings::instance().load();\n        QFile(Settings::instance().cfgFilepath()).remove();\n    }\n\n    void cleanup(){\n\n    }\n\n\n\n    void testWrite() {\n        auto pTmpDir = testhelper::mkAutoDelTmpDir();\n        auto tmpDirPath = pTmpDir->path().toStdString();\n        QVERIFY(tmpDirPath != \"/\"); // otherwise this test must be changed\n        auto tmpDirNoLeadingSlash(tmpDirPath);\n        tmpDirNoLeadingSlash.erase(tmpDirNoLeadingSlash.begin());\n\n        auto r510 = pTmpDir->path() + \"/510\";\n        testhelper::writeStuffToFile(r510, 510);\n\n        auto r4096 = pTmpDir->path() + \"/4096\";\n        testhelper::writeStuffToFile(r4096, 4096);\n\n        auto r21567 = pTmpDir->path() + \"/21567\";\n        testhelper::writeStuffToFile(r21567, 21567);\n\n        auto r101978 = pTmpDir->path() + \"/101978\";\n        testhelper::writeStuffToFile(r101978, 101978);\n\n\n        std::string filepath = tmpDirPath + \"/f1\";\n        std::vector<std::string> cmds {\n                    \"echo '%' > \" + filepath, // percent unveiled a printf format bug in shournal 0.7\n                    \"(echo foo2 ) > \" + filepath,\n                    \"(echo foo3 > \" + filepath + \")\",\n                    \"/bin/echo foo4 > \" + filepath,\n                    \"sh -c 'echo foo5 > \" + filepath + \"'\",\n                    \"echo foo6 > \" + filepath + \" & wait\",\n                    \"(echo foo7 & wait ) > \" + filepath,\n                    \"(echo foo8 > \" + filepath + \") & wait\",\n                    \"/bin/echo foo9 > \" + filepath + \" & wait\",\n                    \"sh -c 'echo foo10 > \" + filepath + \" & wait'\",\n                    // malformed filepath with multiple slash //\n                    \"echo foo11 > //\" + filepath,\n                    // special case root dir\n                    \"cd /; echo foo11 > //\" + filepath,\n                    // relative paths must also work:\n                    \"cd \" + tmpDirPath + \"; echo hi > f1\",\n                    \"cd \" + tmpDirPath + \"; echo hi > ./f1\",\n                    \"cd \" + tmpDirPath + \"; echo hi > ../\" + splitAbsPath(tmpDirPath).second + \"/f1\",\n                    // special case root dir\n                    \"cd /; echo hi > \" + tmpDirNoLeadingSlash + \"/f1\",\n                    // test also if partial hashing works for bigger files\n                    \"cat \" + r510.toStdString() + \" > \" + filepath,\n                    \"cat \" + r4096.toStdString() + \" > \" + filepath,\n                    \"cat \" + r21567.toStdString() + \" > \" + filepath,\n                    \"cat \" + r101978.toStdString() + \" > \" + filepath,\n        };\n\n        const auto setupCmd = AutoTest::globals().integrationSetupCommand;\n\n        for(const auto& cmd : cmds){\n            testhelper::deleteDatabaseDir();\n            cmdWrittenFileCheck(cmd, filepath, setupCmd);\n        }\n    }\n\n\n    void testRead(){\n        const auto setupCmd = AutoTest::globals().integrationSetupCommand;\n\n        auto pTmpDir = testhelper::mkAutoDelTmpDir();\n        // for read events only include our tempdir\n        qsimplecfg::Cfg cfg;\n        writeReadSettingsToCfg(pTmpDir->path(), cfg);\n        auto & sets = Settings::instance();\n        SafeFileUpdate cfgUpd8(sets.cfgFilepath());\n        cfgUpd8.write([&cfg, &cfgUpd8]{\n           cfg.store(cfgUpd8.file());\n        });\n\n\n        const QString fname = \"foo1\";\n        const QString fullPath = pTmpDir->path() + '/' + fname;\n\n        QFileThrow(fullPath).open(QFile::WriteOnly);\n        QString cmdTxt = \"cat \" + fullPath;\n        executeCmdInbservedShell(cmdTxt, setupCmd);\n\n        SqlQuery query;\n        const auto & cols = QueryColumns::instance();\n\n        query.addWithAnd(cols.rFile_path, pTmpDir->path());\n        query.addWithAnd(cols.rFile_name, fname);\n\n        auto cmdIter = db_controller::queryForCmd(query);\n        auto dbCleanup = finally([] { db_connection::close(); });\n        QVERIFY(cmdIter->next());\n        auto cmdInfo = cmdIter->value();\n        QCOMPARE(cmdInfo.text, cmdTxt);\n        QCOMPARE(cmdInfo.fileReadInfos.size(), 1);\n        auto fReadInfo = cmdInfo.fileReadInfos.first();\n        QCOMPARE(fReadInfo.name, fname);\n        QCOMPARE(fReadInfo.path, pTmpDir->path());\n        QCOMPARE(fReadInfo.isStoredToDisk, false);\n\n        QVERIFY(! cmdIter->next());\n        cmdIter.reset();\n\n        // Test exec (collected as read files). Copy system echo-binary to out tmppath\n        auto echoPath = QStandardPaths::findExecutable(\"echo\").toLocal8Bit();\n        QVERIFY(! echoPath.isEmpty());\n        auto echoInTmp = pathJoinFilename(pTmpDir->path().toLocal8Bit(),\n                                          QByteArray(\"echo\"));\n        os::sendfile(echoInTmp, echoPath, os::stat(echoPath).st_size);\n        os::chmod(echoInTmp, 0755);\n\n        // Check if called binaries are tracked when directly called or indirectly, via\n        // env.\n        for(QString cmdTxt : QStringList{echoInTmp + \" exec_trace_test\",\n                                         \"env \" + echoInTmp + \" exec_trace_test\"}){\n            executeCmdInbservedShell(cmdTxt, setupCmd);\n            query.clear();\n            query.addWithAnd(cols.cmd_txt, cmdTxt);\n\n            // Have to explicitly free cmdIter at loop-end to avoid database locks.\n            // On first loop, the transaction of cmdIter may still be active, so\n            // the database would be locked, when we execute the next command in\n            // the shell (!).\n            auto cmdIter = db_controller::queryForCmd(query);\n            QVERIFY(cmdIter->next());\n            cmdInfo = cmdIter->value();\n            QCOMPARE(cmdInfo.text, cmdTxt);\n            QCOMPARE(cmdInfo.fileReadInfos.size(), 1);\n            fReadInfo = cmdInfo.fileReadInfos.first();\n            QCOMPARE(fReadInfo.name, QString(\"echo\"));\n            QCOMPARE(fReadInfo.path, pTmpDir->path());\n        }\n    }\n\n\n    void testReadScript(){\n        const auto setupCmd = AutoTest::globals().integrationSetupCommand;\n\n        auto pTmpDir = testhelper::mkAutoDelTmpDir();\n        // for read events only include our tempdir\n        qsimplecfg::Cfg cfg;\n        writeScriptSettingToCfg(pTmpDir->path(), {\"sh\"}, cfg);\n        writeReadSettingsToCfg(pTmpDir->path(), cfg);\n        auto & sets = Settings::instance();\n        SafeFileUpdate cfgUpd8(sets.cfgFilepath());\n        cfgUpd8.write([&cfg, &cfgUpd8]{\n           cfg.store(cfgUpd8.file());\n        });\n\n        const QString fname = \"foo1.sh\";\n        const QString fullPath = pTmpDir->path() + '/' + fname;\n        const QString content(\"abcdefg\");\n        testhelper::writeStringToFile(fullPath, content);\n\n        const QString cmdTxt = \"cat \" + fullPath;\n        executeCmdInbservedShell(cmdTxt, setupCmd);\n\n        SqlQuery query;\n        const auto & cols = QueryColumns::instance();\n\n        query.addWithAnd(cols.rFile_path, pTmpDir->path());\n        query.addWithAnd(cols.rFile_name, fname);\n\n        auto cmdIter = db_controller::queryForCmd(query);\n        auto dbCleanup = finally([] { db_connection::close(); });\n        QVERIFY(cmdIter->next());\n        auto cmdInfo = cmdIter->value();\n        QCOMPARE(cmdInfo.text, cmdTxt);\n        QCOMPARE(cmdInfo.fileReadInfos.size(), 1);\n        const auto& fReadInfo = cmdInfo.fileReadInfos.first();\n        QCOMPARE(fReadInfo.name, fname);\n        QCOMPARE(fReadInfo.path, pTmpDir->path());\n        QCOMPARE(fReadInfo.isStoredToDisk, true);\n        StoredFiles storedFiles;\n        const QString pathToFileInDb = StoredFiles::getReadFilesDir() + \"/\" +\n                QString::number(fReadInfo.idInDb);\n        QFileThrow fInDb(pathToFileInDb);\n        QVERIFY(fInDb.exists());\n        fInDb.open(QFile::ReadOnly);\n        QCOMPARE(content, testhelper::readStringFromFile(fullPath));\n\n        QVERIFY(! cmdIter->next());\n    }\n\n\n\n\n\n};\n\nDECLARE_TEST(IntegrationTestShell)\n\n#include \"integration_test_shell.moc\"\n"
  },
  {
    "path": "test/main.cpp",
    "content": "\n#include \"autotest.h\"\n\nTEST_MAIN\n"
  },
  {
    "path": "test/sqlite_sample_db_v2_2/readFiles/3",
    "content": "s1\n"
  },
  {
    "path": "test/sqlite_sample_db_v2_2/readFiles/4",
    "content": "s2\n"
  },
  {
    "path": "test/test_cfg.cpp",
    "content": "\n#include <memory>\n#include <QTest>\n#include <QTemporaryFile>\n#include <QTextStream>\n#include <QDebug>\n\n#include \"cfg.h\"\n\n#include \"autotest.h\"\n\nusing qsimplecfg::Cfg;\n\n\nclass CfgTest : public QObject {\n    Q_OBJECT\n\n    const QString CONFIG_TXT = R\"SOMERANDOMTEXT(\n# Initial\n# comment\n\n[section1]\n# section1\n# comment\nsection1_key1=val1\nsection1_key2 =  val2\n\n[section2]\n# section2\n# comment\nsection2_key1 = '''first\n    second\n    third'''\nsection2_key2='''\none\n    two\n\nthree\n'''\n\n[section3]\nsection3_key1 = '''\n'''\nsection3_key2 = '''\nfoo\n'''\n\n)SOMERANDOMTEXT\";\n\n    void verifyStdCfg(Cfg& cfg){\n\n        QVERIFY(cfg.m_parsedNameSectionHash.find(\"section1\") != cfg.m_parsedNameSectionHash.end());\n        auto sect1 = cfg[\"section1\"];\n        sect1->setComments(\"section1\\ncomment\");\n        QCOMPARE(sect1->getValue<QString>(\"section1_key1\"), QString(\"val1\") );\n        QCOMPARE(sect1->getValue<QString>(\"section1_key2\"), QString(\"val2\") );\n\n        QVERIFY(cfg.m_parsedNameSectionHash.find(\"section2\") != cfg.m_parsedNameSectionHash.end());\n        auto  sect2 = cfg[\"section2\"];\n        QCOMPARE(sect2->getValues<QStringList>(\"section2_key1\", {},false, \"\\n\"),\n                                            (QStringList{\"first\", \"second\", \"third\"}) );\n        QCOMPARE(sect2->getValues<QStringList>(\"section2_key2\", {},false, \"\\n\"),\n                 (QStringList{\"one\", \"two\", \"three\"}) );\n\n        QVERIFY(cfg.m_parsedNameSectionHash.find(\"section3\") != cfg.m_parsedNameSectionHash.end());\n        auto  sect3 = cfg[\"section3\"];\n        QCOMPARE(sect3->getValues<QStringList>(\"section3_key1\", {}, false, \"\\n\"), QStringList());\n        QCOMPARE(sect3->getValues<QStringList>(\"section3_key2\", {}, false, \"\\n\"),\n                 (QStringList{\"foo\"}) );\n\n        QVERIFY(cfg.generateNonReadSectionKeyPairs().isEmpty());\n    }\n\n    std::unique_ptr<QTemporaryFile> writeToTmpConfigFile(const QString& txt){\n        auto file = std::unique_ptr<QTemporaryFile>(new QTemporaryFile);\n        if(! file->open()){\n            return file;\n        }\n        QTextStream stream(file.get());\n        stream << txt;\n        file->close();\n        return file;\n    }\n\nprivate slots:\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    void tgeneral() {\n        auto file = writeToTmpConfigFile(CONFIG_TXT);\n        QVERIFY(! file->fileName().isEmpty());\n\n        Cfg cfg;\n        // parse, verify, store and verify again\n        cfg.parse(file->fileName());\n        verifyStdCfg(cfg);\n        cfg.store(file->fileName());\n\n        cfg.parse(file->fileName());\n        verifyStdCfg(cfg);\n    }\n\n    void tRenameSection(){\n        auto file = writeToTmpConfigFile(CONFIG_TXT);\n        QVERIFY(! file->fileName().isEmpty());\n\n        Cfg cfg;\n        cfg.parse(file->fileName());\n        QVERIFY(cfg.renameParsedSection(\"section1\", \"section1_renamed\"));\n\n        auto sect1 = cfg[\"section1_renamed\"];\n        QCOMPARE(sect1->getValue<QString>(\"section1_key1\"), QString(\"val1\") );\n        QCOMPARE(sect1->getValue<QString>(\"section1_key2\"), QString(\"val2\") );\n\n        cfg.store(file->fileName());\n\n        // now that it's renamed, skip the renaming and check if values still correct\n        cfg.parse(file->fileName());\n\n        sect1 = cfg[\"section1_renamed\"];\n        QCOMPARE(sect1->getValue<QString>(\"section1_key1\"), QString(\"val1\") );\n        QCOMPARE(sect1->getValue<QString>(\"section1_key2\"), QString(\"val2\") );\n    }\n\n    void tRenameKey(){\n        auto file = writeToTmpConfigFile(CONFIG_TXT);\n        QVERIFY(! file->fileName().isEmpty());\n\n        Cfg cfg;\n        cfg.parse(file->fileName());\n        auto sect = cfg.getParsedSectionIfExist(\"section1\");\n        QVERIFY(sect != nullptr);\n        QVERIFY(sect->renameParsedKey(\"section1_key1\", \"section1_key1_renamed\"));\n\n        // assign it again -> only sections accessed via opertor[] are stored\n        // to disk later...\n        sect = cfg[\"section1\"];\n        QCOMPARE(sect->getValue<QString>(\"section1_key1_renamed\"), QString(\"val1\") );\n        QCOMPARE(sect->getValue<QString>(\"section1_key2\"), QString(\"val2\") );\n\n        cfg.store(file->fileName());\n\n        cfg.parse(file->fileName());\n        sect = cfg[\"section1\"];\n        QCOMPARE(sect->getValue<QString>(\"section1_key1_renamed\"), QString(\"val1\") );\n        QCOMPARE(sect->getValue<QString>(\"section1_key2\"), QString(\"val2\") );\n\n        // now that it's renamed, skip the renaming and check if values still correct\n    }\n\n};\n\nDECLARE_TEST(CfgTest)\n\n#include \"test_cfg.moc\"\n"
  },
  {
    "path": "test/test_cxxhash.cpp",
    "content": "\n#include <QTest>\n#include <QDebug>\n#include <QTemporaryFile>\n\n#include \"autotest.h\"\n#include \"cxxhash.h\"\n\n\nclass CXXHashTest : public QObject {\n    Q_OBJECT\n\n\n    bool test_hash(int fd, const std::string& str, int chunksize,\n                   int seekstep, int maxCountOfReads, int bufsize,\n                   CXXHash& h, std::string expected=\"\"){\n        ftruncate(fd, 0);\n        lseek(fd, 0, SEEK_SET);\n        os::write(fd, str);\n        lseek(fd, 0, SEEK_SET);\n\n        h.resizeBuf(bufsize);\n        struct partial_xxhash_result res = h.digestFile(fd, chunksize, seekstep, maxCountOfReads);\n\n        if(expected.empty()){\n            // delete all underscores (jumped over during partial hash)\n            expected = str;\n            expected.erase(std::remove(expected.begin(), expected.end(), '_'), expected.end());\n        }\n        return res.hash == XXH64(expected.c_str(), expected.size(), 0 );\n    }\n\n\nprivate slots:\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n\n    void testDigestFile() {\n        QTemporaryFile tmpFile;\n        tmpFile.open();\n        int fd = tmpFile.handle();\n        CXXHash h;\n        const int ignoreMaxReads = std::numeric_limits<int>::max();\n\n       // result should be independent from buffer size\n       for(int bufSize=1; bufSize < 128; bufSize++){\n           QVERIFY(test_hash(fd, \"aa__bb__cc__dd\", 2, 4, ignoreMaxReads, bufSize, h));\n           // change maxCountOfReads\n           QVERIFY(test_hash(fd, \"aa__bb__cc__dd\", 2, 4, 2, bufSize, h, \"aabb\"));\n           // effectively digest everything\n           QVERIFY(test_hash(fd, \"aa__bb__cc__dd\", 2, 2, ignoreMaxReads, bufSize, h,\n                             \"aa__bb__cc__dd\"));\n           // corner case: seekstep == chunksize + 1\n           QVERIFY(test_hash(fd, \"aa__bb__cc__dd\", 2, 3, ignoreMaxReads, bufSize, h,\n                             \"aa_b__c_dd\"));\n           // uneven character length should also work\n           QVERIFY(test_hash(fd, \"aa__bb__cc__d\", 2, 4, ignoreMaxReads, bufSize, h));\n           // larger chunks\n           QVERIFY(test_hash(fd, \"aaaa__bbbb__cccc__dddd\", 4, 6, ignoreMaxReads, bufSize, h));\n       }\n    }\n\n\n\n};\n\n\nDECLARE_TEST(CXXHashTest)\n\n#include \"test_cxxhash.moc\"\n"
  },
  {
    "path": "test/test_db_controller.cpp",
    "content": "\n#include <QTest>\n#include <QTemporaryFile>\n#include <cassert>\n#include <fcntl.h>\n\n#include \"autotest.h\"\n#include \"compat.h\"\n#include \"osutil.h\"\n#include \"helper_for_test.h\"\n#include \"util.h\"\n#include \"database/fileinfos.h\"\n#include \"fileevents.h\"\n\n#include \"database/db_controller.h\"\n#include \"database/db_connection.h\"\n#include \"database/query_columns.h\"\n#include \"database/db_conversions.h\"\n#include \"database/storedfiles.h\"\n#include \"cleanupresource.h\"\n#include \"settings.h\"\n#include \"qfilethrow.h\"\n#include \"stdiocpp.h\"\n\n\n\n\nusing db_controller::QueryColumns;\nusing db_controller::queryForCmd;\n\ntemplate <class ContainerT>\ntypename ContainerT::value_type* __fInfoById(ContainerT& infos, qint64 id_){\n    for(auto& f : infos){\n        if(f.idInDb == id_) return &f;\n    }\n    return nullptr;\n}\n\n\n/// Stored read files may be moved\n/// from cache dir to shournal's db,\n/// to simplify comparison here, store\n/// the content in 'bytes'.\nclass FileReadEventForTest {\npublic:\n    FileReadEventForTest(const QByteArray& bytes) :\n        m_bytes(bytes)\n    {\n        m_file.open();\n        m_file.write(bytes);\n        m_file.seek(0);\n    }\n\n    FileEvent e{};\n\n    const QTemporaryFile& file(){ return m_file; }\n    const QByteArray& bytes(){ return m_bytes; }\n\nprivate:\n    QTemporaryFile m_file;\n    QByteArray m_bytes;\n};\n\ntypedef std::unique_ptr<FileReadEventForTest> FileReadEventForTest_ptr;\n\n\nCommandInfo generateCmdInfo(){\n    static int id_ = 1;\n    CommandInfo cmd;\n    cmd.text = QByteArray::number(id_);\n    cmd.hashMeta.chunkSize = 2048;\n    cmd.hashMeta.maxCountOfReads = 20;\n    cmd.hostname = \"myhost\";\n    cmd.username = \"myuser\";\n    cmd.returnVal = 42;\n    cmd.startTime = Qt::datetimeFromDate(QDate(2019,1, id_ % 28));\n    cmd.endTime = Qt::datetimeFromDate(QDate(2019,1, id_ % 28));\n    cmd.workingDirectory = \"/home/user\";\n\n    id_++;\n    return cmd;\n}\n\nvoid push_back_writeEvent(FileEvents& fEvents, const FileEvent& e){\n    struct stat st{};\n    st.st_mtime = e.mtime();\n    st.st_size = e.size();\n    st.st_mode = mode_t(e.mode());\n    fEvents.write(e.flags(), e.path(), st, e.hash());\n}\n\nvoid push_back_readEvent(FileEvents& fEvents, const FileReadEventForTest_ptr& e){\n    struct stat st = os::fstat(e->file().handle());\n    fEvents.write(e->e.flags(), e->e.path(), st, e->e.hash(), e->file().handle());\n}\n\n\n// sort them by filesize\nvoid sortFileWriteInfos(FileWriteInfos & fInos){\n    std::sort(fInos.begin(), fInos.end(), [](const FileWriteInfo& f1, const FileWriteInfo& f2){\n        return f1.size < f2.size;\n    });\n}\n\n// sort them by filesize\nvoid sortFileReadInfos(FileReadInfos & fInos){\n    std::sort(fInos.begin(), fInos.end(), [](const FileReadInfo& f1, const FileReadInfo& f2){\n        return f1.size < f2.size;\n    });\n}\n\nint countStoredFiles(){\n    return QDir(StoredFiles::getReadFilesDir()).entryList(QDir::Filter::NoDotDot | QDir::Files).size();\n}\n\nint deleteCommandInDb(qint64 id)\n{\n   SqlQuery q;\n   q.addWithAnd(QueryColumns::instance().cmd_id, id, E_CompareOperator::EQ);\n   return db_controller::deleteCommand(q);\n}\n\nvoid db_addFileEventsWrapper(const CommandInfo &cmd, FileEvents &fileEvents){\n    fseek(fileEvents.file(), 0, SEEK_SET);\n    db_controller::addFileEvents(cmd, fileEvents);\n}\n\nclass DbCtrlTest : public QObject {\n    Q_OBJECT\n\n    FileWriteInfo fileWriteEventToWriteInfo(const FileEvent& e){\n        FileWriteInfo i;\n        assert(! e.m_close_event.hash_is_null);\n        i.hash = e.hash();\n\n        auto splittedPah = splitAbsPath(QString(e.path()));\n        i.path = splittedPah.first;\n        i.name = splittedPah.second;\n        i.size = e.size();\n        i.mtime = db_conversions::fromMtime(e.mtime()).toDateTime();\n        return i;\n    }\n\n    FileReadInfo fileReadEventToReadInfo(const FileReadEventForTest_ptr& e){\n        FileReadInfo i;\n        i.mode = mode_t(e->e.mode());\n\n        auto splittedPah = splitAbsPath(QString(e->e.path()));\n        i.path = splittedPah.first;\n        i.name = splittedPah.second;\n        i.size = e->e.size();\n        i.mtime = db_conversions::fromMtime(e->e.mtime()).toDateTime();\n        i.hash = e->e.hash();\n        return i;\n    }\n\n\n\n    FileEvent generateFileWriteEvent(){\n        static auto hash_ = std::numeric_limits<uint64_t>::max();\n        static int id_ = 1;\n\n        FileEvent e{};\n        e.m_close_event.flags = O_WRONLY;\n        e.m_close_event.mtime = Qt::datetimeFromDate(QDate(2019,1, id_ % 28)).toTime_t();\n        e.m_close_event.size = id_;\n        e.m_close_event.mode = 0;\n        e.m_close_event.hash = hash_;\n        e.m_close_event.hash_is_null = false;\n        e.m_close_event.bytes = 0;\n\n        std::string fullpath = \"/tmp/\" + std::to_string(id_) +  \".txt\";\n        e.setPath(fullpath.c_str());\n        hash_--;\n        id_++;\n\n        return  e;\n    }\n\n\n    FileReadEventForTest_ptr\n    generateFileReadEvent(){\n        static auto hash_ = std::numeric_limits<uint64_t>::max();\n        static int id_ = 1;\n        QByteArray fileContent(QByteArray::number(id_), id_);\n        auto e = FileReadEventForTest_ptr(new FileReadEventForTest(fileContent));\n        auto st = os::fstat(e->file().handle());\n\n        e->e.m_close_event.flags = O_RDONLY;\n        e->e.m_close_event.mtime = st.st_mtime;\n        e->e.m_close_event.size = st.st_size;\n        e->e.m_close_event.mode = st.st_mode;\n        e->e.m_close_event.hash = hash_;\n        e->e.m_close_event.hash_is_null = false;\n        e->e.m_close_event.bytes = st.st_size;\n\n        std::string fullpath = \"/tmp/\" + std::to_string(id_) +  \".txt\";\n        e->e.setPath(fullpath.c_str());\n\n        --hash_;\n        ++id_;\n        return e;\n    }\n\n\nprivate slots:\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    void init(){\n        testhelper::setupPaths();\n    }\n\n    void cleanup(){\n        testhelper::deletePaths();\n    }\n\n\n    void tWriteOnly() {\n        FILE* tmpFile = stdiocpp::tmpfile();\n        auto closeTmpFile = finally([&tmpFile] {\n            fclose(tmpFile);\n        });\n        FileEvents fileEvents;\n        fileEvents.setFile(tmpFile);\n\n\n        auto fInfo1 = generateFileWriteEvent();\n        push_back_writeEvent(fileEvents, fInfo1);\n\n        auto fInfo2 = generateFileWriteEvent();\n        push_back_writeEvent(fileEvents, fInfo2);\n\n        CommandInfo cmd1 = generateCmdInfo();\n        cmd1.idInDb = db_controller::addCommand(cmd1);\n        auto closeDb = finally([] {\n            db_connection::close();\n        });\n\n        db_addFileEventsWrapper(cmd1, fileEvents);\n\n        QueryColumns & queryCols = QueryColumns::instance();\n        SqlQuery q1;\n        q1.addWithAnd(queryCols.wFile_size, int(fInfo1.size()) );\n\n        auto cmd1Back = queryForCmd(q1);\n        QVERIFY(cmd1Back->next());\n        cmd1.fileWriteInfos = { fileWriteEventToWriteInfo(fInfo1),\n                                fileWriteEventToWriteInfo(fInfo2) };\n        sortFileWriteInfos(cmd1Back->value().fileWriteInfos);\n        QCOMPARE(cmd1Back->value(), cmd1);\n        q1.clear();\n        q1.addWithAnd(queryCols.wFile_hash, qBytesFromVar(fInfo1.hash().value()) );\n        cmd1Back.reset();\n        cmd1Back = queryForCmd(q1);\n        QVERIFY(cmd1Back->next());\n        sortFileWriteInfos(cmd1Back->value().fileWriteInfos);\n        QCOMPARE(cmd1Back->value(), cmd1);\n\n        // TODO: test with a hash of null\n\n    }\n\n\n    void tRead(){\n        FILE* tmpFile = stdiocpp::tmpfile();\n        auto closeTmpFile = finally([&tmpFile] {\n            fclose(tmpFile);\n        });\n        FileEvents fileEvents;\n        fileEvents.setFile(tmpFile);\n\n        auto readEvent1 = generateFileReadEvent();\n        push_back_readEvent(fileEvents, readEvent1);\n        auto readEvent2 = generateFileReadEvent();\n        push_back_readEvent(fileEvents, readEvent2);\n\n        CommandInfo cmd1 = generateCmdInfo();\n        cmd1.idInDb = db_controller::addCommand(cmd1);\n        auto closeDb = finally([] {\n            db_connection::close();\n        });\n        fflush(tmpFile);\n        db_addFileEventsWrapper(cmd1, fileEvents);\n\n        cmd1.fileReadInfos = {fileReadEventToReadInfo(readEvent1), fileReadEventToReadInfo(readEvent2)};\n\n        QueryColumns & queryCols = QueryColumns::instance();\n        SqlQuery q1;\n        q1.addWithAnd(queryCols.rFile_size, int(readEvent1->e.size()) );\n\n        auto cmd1Back = queryForCmd(q1);\n        QVERIFY(cmd1Back->next());\n        sortFileReadInfos(cmd1Back->value().fileReadInfos);\n        QCOMPARE(cmd1Back->value(), cmd1);\n\n        q1.clear();\n\n        q1.addWithAnd(queryCols.rFile_size, int(readEvent2->e.size()) );\n        cmd1Back.reset();\n        cmd1Back = queryForCmd(q1);\n        QVERIFY(cmd1Back->next());\n        sortFileReadInfos(cmd1Back->value().fileReadInfos);\n        QCOMPARE(cmd1Back->value(), cmd1);\n\n        // TODO: also check bytes of the file?!\n    }\n\n    void tDuplicates(){\n        FILE* tmpFile = stdiocpp::tmpfile();\n        auto closeTmpFile = finally([&tmpFile] {\n            fclose(tmpFile);\n        });\n        FileEvents fileEvents;\n        fileEvents.setFile(tmpFile);\n\n\n        auto wInfo1 = generateFileWriteEvent();\n        push_back_writeEvent(fileEvents, wInfo1);\n        push_back_writeEvent(fileEvents, wInfo1);\n        push_back_writeEvent(fileEvents, wInfo1);\n\n        auto rInfo1 = generateFileReadEvent();\n        push_back_readEvent(fileEvents, rInfo1);\n        push_back_readEvent(fileEvents, rInfo1);\n        push_back_readEvent(fileEvents, rInfo1);\n\n\n        CommandInfo cmd1 = generateCmdInfo();\n        cmd1.idInDb = db_controller::addCommand(cmd1);\n        auto closeDb = finally([] {\n            db_connection::close();\n        });\n\n        db_addFileEventsWrapper(cmd1, fileEvents);\n\n\n        QueryColumns & queryCols = QueryColumns::instance();\n        SqlQuery q1;\n        q1.addWithAnd(queryCols.cmd_id, int(cmd1.idInDb) );\n\n        auto cmd1Back = queryForCmd(q1);\n        QVERIFY(cmd1Back->next());\n        cmd1.fileWriteInfos = { fileWriteEventToWriteInfo(wInfo1)};\n        cmd1.fileReadInfos = { fileReadEventToReadInfo(rInfo1)};\n\n        QCOMPARE(cmd1Back->value(), cmd1);\n    }\n\n\n    void tDeleteCommand(){\n        FILE* tmpFile = stdiocpp::tmpfile();\n        auto closeTmpFile = finally([&tmpFile] {\n            fclose(tmpFile);\n        });\n        FileEvents fileEvents;\n        fileEvents.setFile(tmpFile);\n\n        auto readEvent1 = generateFileReadEvent();\n        push_back_readEvent(fileEvents, readEvent1);\n        auto readEvent2 = generateFileReadEvent();\n        push_back_readEvent(fileEvents, readEvent2);\n\n        auto writeEvent1 = generateFileWriteEvent();\n        push_back_writeEvent(fileEvents, writeEvent1);\n\n        auto writeEvent2 = generateFileWriteEvent();\n        push_back_writeEvent(fileEvents, writeEvent2);\n\n        CommandInfo cmd1 = generateCmdInfo();\n        cmd1.idInDb = db_controller::addCommand(cmd1);\n        auto closeDb = finally([] { db_connection::close(); });\n        db_addFileEventsWrapper(cmd1, fileEvents );\n\n        cmd1.fileReadInfos = {fileReadEventToReadInfo(readEvent1),\n                              fileReadEventToReadInfo(readEvent2)};\n        cmd1.fileWriteInfos = { fileWriteEventToWriteInfo(writeEvent1),\n                                fileWriteEventToWriteInfo(writeEvent2) };\n\n        QCOMPARE(deleteCommandInDb(cmd1.idInDb), 1);\n\n        QueryColumns & queryCols = QueryColumns::instance();\n        SqlQuery q1;\n        // should return all commands\n        q1.addWithAnd(queryCols.rFile_size, 0, E_CompareOperator::GE );\n\n        auto cmd1Back = queryForCmd(q1);\n\n        QVERIFY(! cmd1Back->next());\n\n        auto query = db_connection::mkQuery();\n        query->exec(\"select * from writtenFile\");\n        QVERIFY(! query->next());\n\n        query->exec(\"select * from readFile\");\n        QVERIFY(! query->next());\n\n        query->exec(\"select * from readFileCmd\");\n        QVERIFY(! query->next());\n\n        QCOMPARE(countStoredFiles(), 0);\n\n        // a single command seems to work\n        // Check for two commands, where one read file is unique for each command\n        // while the other is common to both.\n        auto cmd2 = generateCmdInfo();\n        auto readEvent3 = generateFileReadEvent();\n        cmd2.fileReadInfos = {fileReadEventToReadInfo(readEvent1),fileReadEventToReadInfo(readEvent3)};\n\n        cmd1.idInDb = db_controller::addCommand(cmd1);\n\n        stdiocpp::ftruncate_unlocked(fileEvents.file());\n\n        push_back_readEvent(fileEvents, readEvent1);\n        push_back_readEvent(fileEvents, readEvent2);\n\n        db_addFileEventsWrapper(cmd1, fileEvents );\n\n        cmd2.idInDb = db_controller::addCommand(cmd2);\n\n        stdiocpp::ftruncate_unlocked(fileEvents.file());\n\n        push_back_readEvent(fileEvents, readEvent1);\n        push_back_readEvent(fileEvents, readEvent3);\n\n        db_addFileEventsWrapper(cmd2, fileEvents );\n\n        QCOMPARE(deleteCommandInDb(cmd1.idInDb), 1);\n        // readEvent1 is common to both and should remain, readEvent2 should be deleted,\n        // readEvent3 should still be there\n        const char* qReadFileSize = \"select * from readFile where size=?\";\n\n        query->prepare(qReadFileSize);\n        query->addBindValue(qint64(readEvent1->e.size()));\n        query->exec();\n        QVERIFY(query->next());\n\n        query->prepare(qReadFileSize);\n        query->addBindValue(qint64(readEvent2->e.size()));\n        query->exec();\n        QVERIFY(! query->next());\n\n        query->prepare(qReadFileSize);\n        query->addBindValue(qint64(readEvent3->e.size()));\n        query->exec();\n        QVERIFY(query->next());\n\n        QCOMPARE(countStoredFiles(), 2);\n\n        QCOMPARE(deleteCommandInDb(cmd2.idInDb), 1);\n        query->exec(\"select * from writtenFile\");\n        QVERIFY(! query->next());\n\n        query->exec(\"select * from readFile\");\n        QVERIFY(! query->next());\n\n        query->exec(\"select * from readFileCmd\");\n        QVERIFY(! query->next());\n\n        query->exec(\"select * from hashmeta\");\n        QVERIFY(! query->next());\n\n        query->exec(\"select * from pathtable\");\n        QVERIFY(! query->next());\n\n        QCOMPARE(countStoredFiles(), 0);\n    }\n\n    void tSchemeUpdates(){\n        const QString & dbDir = db_connection::getDatabaseDir();\n        os::rmdir(dbDir.toUtf8());\n\n        // Copy a database with sample data to the test-database-dir\n        // and check, whether the data survives the scheme update(s), which\n        // are automatically performed upon the first database-usage.\n        // Until including v2.2 nothing testworthy happened\n        // -> src-path for database defined in cmake.\n        QString testDbPath;\n        if (QDir(SHOURNALTEST_SQLITE_v_2_2).exists()){\n            testDbPath = SHOURNALTEST_SQLITE_v_2_2;\n        } else {\n            // also consider current directory to allow for easy testing\n            // on another machine.\n            testDbPath = splitAbsPath<QString>(SHOURNALTEST_SQLITE_v_2_2).second;\n            if (! QDir(testDbPath).exists()){\n                QIErr() << QString(\"dir of testdatabase not found: %1\").arg(testDbPath);\n                QVERIFY(false);\n            }\n        }\n        QVERIFY(testhelper::copyRecursively(testDbPath, dbDir));\n\n        QueryColumns & queryCols = QueryColumns::instance();\n        SqlQuery q;\n        q.addWithAnd(queryCols.wFile_id, 1);\n\n        auto cmd = queryForCmd(q);\n        QCOMPARE(cmd->computeSize(), 1);\n        QVERIFY(cmd->next());\n        QCOMPARE(cmd->value().fileWriteInfos.size(),2);\n        auto fw = __fInfoById(cmd->value().fileWriteInfos, 1);\n        QVERIFY(fw);\n        QVERIFY(fw->name == \"one\");\n        QVERIFY(fw->path == \"/home/tycho\");\n\n        fw = __fInfoById(cmd->value().fileWriteInfos, 2);\n        QVERIFY(fw);\n        QVERIFY(fw->name == \"two\");\n        QVERIFY(fw->path == \"/home/tycho\");\n\n\n        // ---------\n        q.clear();\n        q.addWithAnd(queryCols.cmd_id, 2);\n        cmd.reset();\n        cmd = queryForCmd(q);\n        QVERIFY(cmd->next());\n        QCOMPARE(cmd->value().fileReadInfos.size(),2);\n        auto fr = __fInfoById(cmd->value().fileReadInfos, 1);\n        QVERIFY(fr);\n        QVERIFY(fr->name == \"one\");\n        QVERIFY(fr->path == \"/home/tycho\");\n        QVERIFY(!fr->isStoredToDisk);\n\n        fr = __fInfoById(cmd->value().fileReadInfos, 2);\n        QVERIFY(fr);\n        QVERIFY(fr->name == \"two\");\n        QVERIFY(fr->path == \"/home/tycho\");\n        QVERIFY(!fr->isStoredToDisk);\n\n        // ---------\n\n        q.clear();\n        q.addWithAnd(queryCols.cmd_id, 3);\n        cmd.reset();\n        cmd = queryForCmd(q);\n        QVERIFY(cmd->next());\n        QCOMPARE(cmd->value().fileWriteInfos.size(),2);\n        fw = __fInfoById(cmd->value().fileWriteInfos, 3);\n        QVERIFY(fw);\n        QVERIFY(fw->name == \"three\");\n        QVERIFY(fw->path == \"/tmp\");\n\n        fw = __fInfoById(cmd->value().fileWriteInfos, 4);\n        QVERIFY(fw);\n        QVERIFY(fw->name == \"four\");\n        QVERIFY(fw->path == \"/tmp\");\n\n        // ---------\n        q.clear();\n        q.addWithAnd(queryCols.cmd_id, 4);\n        cmd.reset();\n        cmd = queryForCmd(q);\n        QVERIFY(cmd->next());\n        QCOMPARE(cmd->value().fileReadInfos.size(),2);\n        fr = __fInfoById(cmd->value().fileReadInfos, 3);\n        QVERIFY(fr);\n        QVERIFY(fr->name == \"script1.sh\");\n        QVERIFY(fr->path == \"/home/tycho/storeme\");\n        QVERIFY(fr->isStoredToDisk);\n\n        fr = __fInfoById(cmd->value().fileReadInfos, 4);\n        QVERIFY(fr);\n        QVERIFY(fr->name == \"script2.sh\");\n        QVERIFY(fr->path == \"/home/tycho/storeme\");\n        QVERIFY(fr->isStoredToDisk);\n\n    }\n\n\n};\n\n\nDECLARE_TEST(DbCtrlTest)\n\n#include \"test_db_controller.moc\"\n\n"
  },
  {
    "path": "test/test_fdcommunication.cpp",
    "content": "\n// Some older systems like CentOS7 (older glibc-versions) don't provide it yet.\n// Below workaround is ugly but works.\n#if __has_include(<linux/kcmp.h>)\n#include <linux/kcmp.h>\n#else\nenum kcmp_type { KCMP_FILE};\n#endif\n\n#include <sys/syscall.h>\n\n#include <sys/socket.h>\n#include <QTest>\n#include <QTemporaryFile>\n\n\n#include \"autotest.h\"\n#include \"fdcommunication.h\"\n#include \"os.h\"\n#include \"cleanupresource.h\"\n#include \"excos.h\"\n\nusing fdcommunication::SocketCommunication;\nusing Message = SocketCommunication::Message;\n\n\nstatic QPair<SocketCommunication, SocketCommunication> makeSockets(){\n    auto sockets = os::socketpair(PF_UNIX, SOCK_STREAM);\n    SocketCommunication sendSock;\n    sendSock.setSockFd(sockets[0]);\n\n    SocketCommunication receiveSock;\n    receiveSock.setReceiveBufferSize(1024);\n    receiveSock.setSockFd(sockets[1]);\n\n    return {sendSock, receiveSock};\n}\n\nclass FCommunicationTest : public QObject {\n    Q_OBJECT\nprivate:\n    bool fdsAreEqual(int fd1, int fd2)\n    {\n        auto pid = getpid();\n        auto ret =  syscall(SYS_kcmp, pid, pid, KCMP_FILE, fd1, fd2);\n        if(ret == -1)throw os::ExcOs(\"SYS_kcmp failed: \");\n        return ret == 0;\n    }\n\n\nprivate slots:\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    void tNormal() {\n        auto sockets = makeSockets();\n        auto sendSock = sockets.first;\n        auto receiveSock = sockets.second;\n        auto closeSocks = finally([&sendSock, &receiveSock] {\n            close(sendSock.sockFd());\n            close(receiveSock.sockFd());\n        });\n\n        Message msg1{1,  \"echo hi some_text_with_umläüts and greek \"\n                         \"δ ω π Δ σ α β γ Σ λ ε µ DONE\" };\n        sendSock.sendMsg(msg1);\n\n        Message msg2{2, \"abcdefg\"};\n        sendSock.sendMsg(msg2);\n\n        auto messages = receiveSock.receiveMessages();\n\n        QCOMPARE(2, messages.size());\n        QCOMPARE(msg1, messages[0]);\n        QCOMPARE(msg2, messages[1]);\n\n        // send both messages aggregated.\n        sendSock.sendMessages({msg1, msg2});\n\n        QCOMPARE(2, messages.size());\n        QCOMPARE(msg1, messages[0]);\n        QCOMPARE(msg2, messages[1]);\n    }\n\n\n    void tFd() {\n        auto sockets = makeSockets();\n        auto sendSock = sockets.first;\n        auto receiveSock = sockets.second;\n        auto closeSocks = finally([&sendSock, &receiveSock] {\n            close(sendSock.sockFd());\n            close(receiveSock.sockFd());\n        });\n\n        QTemporaryFile tmpFile1;\n        QVERIFY(tmpFile1.open());\n\n        Message msg1{1, \"foobar\", tmpFile1.handle()};\n\n        sendSock.sendMsg(msg1);\n\n\n        auto messages = receiveSock.receiveMessages();\n\n        QCOMPARE(1, messages.size());\n        QCOMPARE(msg1.msgId, messages[0].msgId);\n        QCOMPARE(msg1.bytes, messages[0].bytes);\n        QVERIFY(messages[0].fd != -1);\n\n        QVERIFY(fdsAreEqual(msg1.fd, messages[0].fd));\n        os::close(messages[0].fd);\n\n\n        // check two fds in sequence.\n        // In this case they are also received in sequence.\n        receiveSock.setReceiveFdSize(10);\n        QTemporaryFile tmpFile2;\n        QVERIFY(tmpFile2.open());\n\n        Message msg2{2, \"ok_youä#ü\", tmpFile2.handle()};\n\n\n        sendSock.sendMsg(msg1);\n        sendSock.sendMsg(msg2);\n\n        messages = receiveSock.receiveMessages();\n        QCOMPARE(1, messages.size());\n        QCOMPARE(msg1.msgId, messages[0].msgId);\n        QCOMPARE(msg1.bytes, messages[0].bytes);\n        QVERIFY(messages[0].fd != -1);\n\n        QVERIFY(fdsAreEqual(msg1.fd, messages[0].fd));\n        os::close(messages[0].fd);\n\n        messages = receiveSock.receiveMessages();\n        QCOMPARE(1, messages.size());\n        QCOMPARE(msg2.msgId, messages[0].msgId);\n        QCOMPARE(msg2.bytes, messages[0].bytes);\n        QVERIFY(messages[0].fd != -1);\n\n        QVERIFY(fdsAreEqual(msg2.fd, messages[0].fd));\n        os::close(messages[0].fd);\n\n\n        // test two fds at once\n        sendSock.sendMessages({msg1, msg2});\n        messages = receiveSock.receiveMessages();\n        QCOMPARE(2, messages.size());\n        QCOMPARE(msg1.msgId, messages[0].msgId);\n        QCOMPARE(msg1.bytes, messages[0].bytes);\n        QVERIFY(messages[0].fd != -1);\n        QCOMPARE(msg2.msgId, messages[1].msgId);\n        QCOMPARE(msg2.bytes, messages[1].bytes);\n        QVERIFY(messages[1].fd != -1);\n\n        QVERIFY(fdsAreEqual(msg1.fd, messages[0].fd));\n        os::close(messages[0].fd);\n\n        QVERIFY(fdsAreEqual(msg2.fd, messages[1].fd));\n        os::close(messages[1].fd);\n\n\n        // test two fds with regular message (without fd) in between\n\n        Message msgReg(3, \"reg\");\n        sendSock.sendMessages({msg1, msgReg, msg2});\n\n        messages = receiveSock.receiveMessages();\n        QCOMPARE(3, messages.size());\n        QCOMPARE(msg1.msgId, messages[0].msgId);\n        QCOMPARE(msg1.bytes, messages[0].bytes);\n        QVERIFY(messages[0].fd != -1);\n\n        QVERIFY(fdsAreEqual(msg1.fd, messages[0].fd));\n        os::close(messages[0].fd);\n\n        QCOMPARE(msgReg, messages[1]);\n\n        QCOMPARE(msg2.msgId, messages[2].msgId);\n        QCOMPARE(msg2.bytes, messages[2].bytes);\n        QVERIFY(messages[2].fd != -1);\n        QVERIFY(fdsAreEqual(msg2.fd, messages[2].fd));\n        os::close(messages[2].fd);\n\n }\n\n};\n\n\nDECLARE_TEST(FCommunicationTest)\n\n#include \"test_fdcommunication.moc\"\n"
  },
  {
    "path": "test/test_fileeventhandler.cpp",
    "content": "\n#include <cstdio>\n#include <cstring>\n#include <unistd.h>\n\n#include <QTest>\n#include <QTemporaryFile>\n\n\n\n#include \"autotest.h\"\n#include \"helper_for_test.h\"\n\n#include \"util.h\"\n#include \"os.h\"\n#include \"osutil.h\"\n#include \"settings.h\"\n#include \"stdiocpp.h\"\n\n#include \"fileeventhandler.h\"\n\n\n/// Write the content of buf to fd, let\n/// the FileEventHandler process that file and\n/// compare the partial hashes.\nvoid writeCompareBuf(const std::string & buf,\n                     const std::string & hasStr,\n                     const int fd){\n    ftruncate(fd, 0);\n    write(fd, buf.c_str(), buf.size());\n    lseek(fd, 0, SEEK_SET);\n\n    FileEventHandler fEventHandler;\n    fEventHandler.handleCloseWrite(fd);\n    lseek(fd, 0, SEEK_SET);\n\n    uint64_t correctHash = XXH64(hasStr.c_str(), hasStr.size(), 0 );\n\n    stdiocpp::fseek(fEventHandler.fileEvents().file(), 0 , SEEK_SET);\n    FileEvent* e = fEventHandler.fileEvents().read();\n    QVERIFY(e != nullptr);\n\n    auto path = osutil::findPathOfFd<std::string>(fd);\n    //QIErr() << \"std::string(e.fullPath), path\" << QString(e.fullPath) << QString::fromStdString(path);\n    QCOMPARE(std::string(e->path()), path);\n    QVERIFY(! e->hash().isNull());\n    QCOMPARE(correctHash, e->hash().value());\n}\n\nclass FileEventHandlerTest : public QObject {\n    Q_OBJECT\nprivate slots:\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    void init(){\n        //testhelper::setupPaths();\n    }\n\n    void cleanup(){\n        // testhelper::cleanupPaths();\n    }\n\n    void tWrite() {\n        /// Primarily a test, if hashChunkSize and\n        /// hashMaxCountOfReads are handled correctly\n        // Don't use QTemporaryFile here, we need a regular\n        // file with st1.st_nlink > 0\n        char tmpFileName[] = \"fileevent_test_XXXXXX\";\n        int fd = mkstemp(tmpFileName);\n        QVERIFY(os::fstat(fd).st_nlink > 0);\n        auto rmTmpFile = finally([&tmpFileName] { remove(tmpFileName); });\n\n\n        auto & sets = Settings::instance();\n        sets.m_wSettings.includePaths->insert(\"/\");\n\n        sets.m_hashSettings.hashEnable = true;\n        sets.m_hashSettings.hashMeta = HashMeta(2, 2);\n\n        std::string buf = \"g\";\n        writeCompareBuf(buf, buf, fd);\n\n        buf = \"gh\";\n        writeCompareBuf(buf, buf, fd);\n\n        buf = \"abc\";\n        writeCompareBuf(buf, buf, fd);\n\n        buf = \"abcd\";\n        writeCompareBuf(buf, buf, fd);\n\n        // only 2 chars (hashChunkSize) each at index 0 and\n        // 10/hashMaxCountOfReads = 5 should be read and used for hash\n        writeCompareBuf(\"ab___cd___\", \"abcd\", fd);\n\n        sets.m_hashSettings.hashMeta = HashMeta(3, 2);\n        writeCompareBuf(\"abc__def___\", \"abcdef\", fd);\n\n        sets.m_hashSettings.hashMeta = HashMeta(1, 2);\n        writeCompareBuf(\"a____d_____\", \"ad\", fd);\n\n        sets.m_hashSettings.hashMeta = HashMeta(1, 3);\n        writeCompareBuf(\"a__b__c___\", \"abc\", fd);\n\n    }\n\n    void tRead(){\n        // TODO: implement a test...\n\n        auto & readSettings = Settings::instance().m_scriptSettings;\n        readSettings.enable = true;\n        readSettings.includePaths->insert(\"/\"); // todo: mk unique path\n        readSettings.maxFileSize = 50000;\n        readSettings.onlyWritable = true;\n        readSettings.includeExtensions = {};\n        readSettings.maxCountOfFiles = 1;\n        readSettings.flushToDiskTotalSize = 10*1000;\n    }\n\n};\n\n\n\nDECLARE_TEST(FileEventHandlerTest)\n\n#include \"test_fileeventhandler.moc\"\n\n"
  },
  {
    "path": "test/test_osutil.cpp",
    "content": "\n#include <QTest>\n#include <QDebug>\n#include <QTemporaryFile>\n\n#include \"autotest.h\"\n\n#include \"osutil.h\"\n\nusing namespace osutil;\n\nclass OsutilTest : public QObject {\n    Q_OBJECT\nprivate slots:\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    void testReadWholeFile() {\n        QTemporaryFile f;\n        QVERIFY(f.open());\n        QByteArray val(\"123456\");\n        f.write(val);\n        f.seek(0);\n        // different buffer sizes should ot change the result\n        QCOMPARE(readWholeFile(f.handle(), 6), val);\n        f.seek(0);\n        QCOMPARE(readWholeFile(f.handle(), 7), val);\n        f.seek(0);\n        QCOMPARE(readWholeFile(f.handle(), 3), val);\n        f.seek(0);\n        QCOMPARE(readWholeFile(f.handle(), 1), val);\n        f.seek(0);\n\n    }\n\n\n};\n\n\nDECLARE_TEST(OsutilTest)\n\n#include \"test_osutil.moc\"\n"
  },
  {
    "path": "test/test_pathtree.cpp",
    "content": "\n#include <QTest>\n#include <QDebug>\n#include <iostream>\n\n#include \"autotest.h\"\n\n#include \"pathtree.h\"\n#include \"util.h\"\n\n\n\nclass PathTreeTest : public QObject {\n    Q_OBJECT\n\n    void checkAllSubPathsExist(PathTree& tree,\n                               const StrLight& parentPath,\n                               std::unordered_set<StrLight> paths){\n        for(auto treeIt=tree.subpathIter(parentPath); treeIt != tree.end(); ++treeIt){\n            auto it = paths.find(*treeIt);\n            QVERIFY2(it != paths.end(), (*treeIt).c_str());\n            paths.erase(it);\n        }\n        QVERIFY(paths.empty());\n    }\n\n    void checkAllExist(PathTree& tree,\n                       std::unordered_set<StrLight> paths){\n        for(const auto & p : tree){\n            auto it = paths.find(p);\n            QVERIFY2(it != paths.end(), (p).c_str());\n            paths.erase(it);\n        }\n        auto p = (paths.empty()) ? \"\" : *paths.begin();\n        QVERIFY2(paths.empty(), p.c_str());\n    }\n\n    void erasePathTreeFromIt(PathTree& tree, PathTree::iterator it){\n        while(it != tree.end() ){\n            it = tree.erase(it);\n        }\n    }\n\n\nprivate slots:\n\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    void testContains(){\n        PathTree tree;\n        QVERIFY(! tree.contains(\"/\"));\n        tree.insert(\"/\");\n        QVERIFY(tree.contains(\"/\"));\n        tree.clear();\n\n        QVERIFY(! tree.contains(\"/\"));\n\n        tree.insert(\"/\");\n        tree.insert(\"/home/user/foo\");\n\n        QVERIFY(tree.contains(\"/\"));\n        QVERIFY(tree.contains(\"/home/user/foo\"));\n\n    }\n\n\n    void testParent() {\n        PathTree tree;\n        QVERIFY(! tree.isParentPath(\"/\"));\n        QVERIFY(! tree.isParentPath(\"/home\"));\n\n        tree.insert(\"/home/user\");\n\n        QVERIFY(tree.isParentPath(\"/home\"));\n        QVERIFY(tree.isParentPath(\"/\"));\n        QVERIFY(tree.isParentPath(\"/home/user\", true));\n        QVERIFY(! tree.isParentPath(\"/home/user\"));\n        QVERIFY(! tree.isParentPath(\"/home/user/foo\", true));\n        QVERIFY(! tree.isParentPath(\"/home/user/foo\", false));\n\n        // special case root\n        PathTree tree2;\n        tree2.insert(\"/\");\n        QVERIFY(! tree2.isParentPath(\"/\", false));\n        QVERIFY( tree2.isParentPath(\"/\", true));\n        QVERIFY( ! tree2.isParentPath(\"/home\"));\n\n        tree2.insert(\"/home/user\");\n        QVERIFY( tree2.isParentPath(\"/\", false));\n        QVERIFY( tree2.isParentPath(\"/\", true));\n        QVERIFY( tree2.isParentPath(\"/home\"));\n        QVERIFY(! tree2.isParentPath(\"/home/user\", false));\n        QVERIFY( tree2.isParentPath(\"/home/user\", true));\n\n\n    }\n\n    void testSub() {\n        PathTree tree;\n        QVERIFY(! tree.isSubPath(\"/\"));\n        QVERIFY(! tree.isSubPath(\"/home\"));\n\n        tree.insert(\"/home/user1\");\n        tree.insert(\"/home/user2\");\n        QVERIFY(! tree.isSubPath(\"/home\"));\n        QVERIFY(! tree.isSubPath(\"/home/user1\", false));\n        QVERIFY( tree.isSubPath(\"/home/user1\", true));\n        QVERIFY( tree.isSubPath(\"/home/user1/foo\", false));\n        QVERIFY( tree.isSubPath(\"/home/user1/foo\", true));\n        QVERIFY(! tree.isSubPath(\"/home/nouser/foo\", true));\n\n        // special case root\n        PathTree tree2;\n        tree2.insert(\"/\");\n        QVERIFY(! tree2.isSubPath(\"/\", false));\n        QVERIFY( tree2.isSubPath(\"/\", true));\n        QVERIFY(  tree2.isSubPath(\"/home\"));\n\n        tree2.insert(\"/home\");\n        QVERIFY(! tree2.isSubPath(\"/\", false));\n        QVERIFY( tree2.isSubPath(\"/\", true));\n        QVERIFY(  tree2.isSubPath(\"/home\"));\n        QVERIFY(  tree2.isSubPath(\"/home/foo\"));\n\n        auto tree3 = std::make_shared<PathTree>();\n        tree3->insert(\"/tmp/shournal-integration-test-AsKCoY\");\n        QVERIFY( tree3->isSubPath(\"/tmp/shournal-integration-test-AsKCoY/foo1\", false));\n\n    }\n\n    void testFindSub(){        \n        PathTree tree;\n        tree.insert(\"/\");\n        QVERIFY(tree.begin() != tree.end());\n        QVERIFY(tree.subpathIter(\"/\") == tree.end());\n\n        tree.insert(\"/home\");\n        QVERIFY(tree.subpathIter(\"/\") != tree.end());\n        QVERIFY2(*tree.subpathIter(\"/\") == \"/home\", (*tree.subpathIter(\"/\")).c_str());\n\n        tree.insert(\"/home/user\");\n        tree.insert(\"/var\");\n\n        checkAllSubPathsExist(tree, \"/\", {\"/home\", \"/home/user\", \"/var\"});\n\n        tree.clear();\n        QVERIFY(tree.begin() == tree.end());\n\n        tree.insert(\"/home/foo\");\n        tree.insert(\"/media/data/123\");\n        tree.insert(\"/media/data/456\");\n        tree.insert(\"/media/data/789\");\n\n        checkAllSubPathsExist(tree, \"/media\",\n            {\"/media/data/123\",\n             \"/media/data/456\",\n             \"/media/data/789\",\n            });\n    }\n\n    void testClear(){\n        PathTree tree;\n        tree.insert(\"/home/user\");\n        tree.clear();\n        QVERIFY(! tree.isSubPath(\"/home/user/foo\"));\n        tree.insert(\"/home/user\");\n        QVERIFY(tree.isSubPath(\"/home/user/foo\"));\n    }\n\n    void testIter(){\n        PathTree tree;\n        QVERIFY(tree.begin() == tree.end());\n\n        std::unordered_set<StrLight> paths {\n            \"/home/user/foodir\",\n            \"/home/user/another\",\n            \"/media/cdrom/aha\",\n            \"/media/ok/123\",\n            \"/var/log\",\n            \"/\"\n        };\n        tree.insert(paths.begin(), paths.end());\n\n        checkAllExist(tree, paths);\n\n    }\n\n    void testErase(){\n        PathTree tree;\n        const std::unordered_set<StrLight> paths {\n            \"/home/user\",\n            \"/home/user/sub1\",\n            \"/home/user/sub2/subsub1\",\n            \"/media/cdrom\",\n            \"/var\",\n            \"/\"\n        };\n\n        tree.insert(paths.begin(), paths.end());\n        auto it = tree.iter(\"/home\");\n        QVERIFY(it != tree.end());\n        erasePathTreeFromIt(tree, it);\n        checkAllExist(tree, {\"/media/cdrom\", \"/var\", \"/\"});\n\n\n        tree.insert(paths.begin(), paths.end());\n        it = tree.iter(\"/home/user\");\n        QVERIFY(it != tree.end());\n        erasePathTreeFromIt(tree, it);\n        checkAllExist(tree, {\"/media/cdrom\", \"/var\", \"/\"});\n\n        tree.insert(paths.begin(), paths.end());\n        it = tree.iter(\"/home/user/sub1\");\n        QVERIFY(it != tree.end());\n        erasePathTreeFromIt(tree, it);\n        checkAllExist(tree, {\n                          \"/home/user\",\n                          \"/home/user/sub2/subsub1\",\n                          \"/media/cdrom\",\n                          \"/var\",\n                          \"/\",\n                      });\n\n        tree.insert(paths.begin(), paths.end());\n        it = tree.iter(\"/home/user/sub2/subsub1\");\n        QVERIFY(it != tree.end());\n        erasePathTreeFromIt(tree, it);\n        checkAllExist(tree, {\n                          \"/home/user\",\n                          \"/home/user/sub1\",\n                          \"/media/cdrom\",\n                          \"/var\",\n                          \"/\",\n                      });\n\n        tree.insert(paths.begin(), paths.end());\n        it = tree.iter(\"/var\");\n        QVERIFY(it != tree.end());\n        erasePathTreeFromIt(tree, it);\n        checkAllExist(tree, {\n                          \"/home/user\",\n                          \"/home/user/sub1\",\n                          \"/home/user/sub2/subsub1\",\n                          \"/media/cdrom\",\n                          \"/\",\n                      });\n\n        tree.insert(paths.begin(), paths.end());\n        it = tree.iter(\"/\");\n        QVERIFY(it != tree.end());\n        erasePathTreeFromIt(tree, it);\n        checkAllExist(tree, {});\n    }\n};\n\n\nDECLARE_TEST(PathTreeTest)\n\n#include \"test_pathtree.moc\"\n\n"
  },
  {
    "path": "test/test_qformattedstream.cpp",
    "content": "\n#include \"qoutstream.h\"\n#include \"qformattedstream.h\"\n\n#include \"autotest.h\"\n\n\nclass QFormattedtStreamTest : public QObject {\n    Q_OBJECT\nprivate slots:\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    void testIt() {\n        QString str;\n        QFormattedStream s(&str);\n        s.setLineStart(\"# \");\n        s.setMaxLineWidth(5);\n\n        s << \"aha next\\ntext\\nFoo\\nAVeryLongWord\";\n        s << \"ok\\n\";\n        s << \"na\";\n\n        QCOMPARE(str, QString(\"# aha\\n# nex\\n# t\\n# tex\\n# t\\n# Foo\\n# AVe\\n# ryL\\n# ong\\n\"\n                         \"# Wor\\n# d \\n# ok\\n# na \"));\n    }\n\n\n};\n\nDECLARE_TEST(QFormattedtStreamTest)\n\n#include \"test_qformattedstream.moc\"\n"
  },
  {
    "path": "test/test_qoptargparse.cpp",
    "content": "\n\n\n#include <QTest>\n#include <QDebug>\n#include <QTemporaryFile>\n\n#include \"autotest.h\"\n\n#include \"qoptargparse/qoptargparse.h\"\n#include \"qoptargparse/qoptsqlarg.h\"\n#include \"qoptargparse/qoptvarlenarg.h\"\n\n\nclass QOptArgparseTest : public QObject {\n    Q_OBJECT\nprivate slots:\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    void testIt() {\n        QOptArgParse parser;\n        QOptArg arg1(\"\", \"one\", \"\");\n        parser.addArg(&arg1);\n\n        QOptSqlArg arg2Sql(\"\", \"two\", \"\", {E_CompareOperator::EQ} );\n        parser.addArg(&arg2Sql);\n\n        QOptSqlArg arg3Sql(\"\", \"three\", \"\", {E_CompareOperator::EQ} );\n        parser.addArg(&arg3Sql);\n\n        QOptSqlArg arg4Sql(\"\", \"four\", \"\", {E_CompareOperator::BETWEEN}, E_CompareOperator::BETWEEN );\n        parser.addArg(&arg4Sql);\n\n        QOptVarLenArg arg5VarLen(\"\", \"five\", \"\");\n        parser.addArg(&arg5VarLen);\n\n        QOptArg arg6(\"\", \"six\", \"\", false);\n        arg6.setFinalizeFlag(true);\n        parser.addArg(&arg6);\n\n        QVector<const char*> argv = {\"--one\", \"1\",\n                              \"--two\", \"-eq\", \"2\",\n                              \"--three\", \"3\",\n                              \"--four\", \"-between\", \"4_1\", \"4_2\",\n                              \"--five\", \"3\", \"5_1\", \"5_2\", \"5_3\",\n                              \"--six\", \"6_1\", \"6_2\", \"6_3\", \"6_4\",\n                              nullptr};\n\n        parser.parse(argv.size() - 1, (char**)argv.data());\n\n        QVERIFY(arg1.wasParsed());\n        QCOMPARE(arg1.getValue<QString>(), QString(\"1\"));\n\n        QVERIFY(arg2Sql.wasParsed());\n        QCOMPARE(arg2Sql.getValue<QString>(), QString(\"2\"));\n        QCOMPARE(arg2Sql.parsedOperator(), E_CompareOperator::EQ);\n\n        QVERIFY(arg3Sql.wasParsed());\n        QCOMPARE(arg3Sql.getValue<QString>(), QString(\"3\"));\n        QCOMPARE(arg3Sql.parsedOperator(), E_CompareOperator::EQ);\n\n        QVERIFY(arg4Sql.wasParsed());\n        QCOMPARE(arg4Sql.getValues<QStringList>(), QStringList({\"4_1\", \"4_2\"}));\n        QCOMPARE(arg4Sql.parsedOperator(), E_CompareOperator::BETWEEN);\n\n        QVERIFY(arg5VarLen.wasParsed());\n        QCOMPARE(arg5VarLen.getValues<QStringList>(), QStringList({\"5_1\", \"5_2\", \"5_3\"}));\n\n        QVERIFY(arg6.wasParsed());\n        QCOMPARE(parser.rest().len, 4);\n        const char* arg6Actual[] = {\"6_1\", \"6_2\", \"6_3\", \"6_4\"};\n        for(int i=0; i < parser.rest().len; i++ ){\n            QVERIFY(strcmp(parser.rest().argv[i], arg6Actual[i]) == 0);\n        }\n    }\n\n\n};\n\n\nDECLARE_TEST(QOptArgparseTest)\n\n#include \"test_qoptargparse.moc\"\n"
  },
  {
    "path": "test/test_util.cpp",
    "content": "\n\n\n\n#include <QTest>\n#include <QDebug>\n#include <QTemporaryFile>\n\n#include \"autotest.h\"\n\n\nclass UtilTest : public QObject {\n    Q_OBJECT\n\n    template<class T>\n    void splitAbsPathTest(const T & p, const T& expectedPath, const T& expectedFile)\n    {\n        auto pair = splitAbsPath(p);\n        QVERIFY(pair.first == expectedPath);\n        QVERIFY(pair.second == expectedFile);\n    }\n\n\nprivate slots:\n    void initTestCase(){\n        logger::setup(__FILE__);\n    }\n\n    void testSplitAbsPath() {\n        splitAbsPathTest<std::string>(\"/\", \"/\", \"\");\n        splitAbsPathTest<QString>(\"/\", \"/\", \"\");\n\n        splitAbsPathTest<std::string>(\"/home\", \"/\", \"home\");\n        splitAbsPathTest<QString>(\"/home\", \"/\", \"home\");\n\n        splitAbsPathTest<std::string>(\"/home/user\", \"/home\", \"user\");\n        splitAbsPathTest<QString>(\"/home/user\", \"/home\", \"user\");\n\n        splitAbsPathTest<std::string>(\"/home/user/foo\", \"/home/user\", \"foo\");\n        splitAbsPathTest<QString>(\"/home/user/foo\", \"/home/user\", \"foo\");\n\n    }\n\n    void testPathJoinFilename(){\n        QVERIFY(pathJoinFilename(QString(\"/\"), QString(\"foo\")) == \"/foo\");\n        QVERIFY(pathJoinFilename(QString(\"/home/foo\"), QString(\"bar\")) == \"/home/foo/bar\");\n    }\n\n\n};\n\n\nDECLARE_TEST(UtilTest)\n\n#include \"test_util.moc\"\n"
  }
]