[
  {
    "path": ".circleci/config.yml",
    "content": "version: 2.1\n\naliases:\n  docker-image: &image\n    - image: mambaorg/micromamba\n  filter-pr-only: &PR-only\n    branches:\n      ignore:\n        - master\n    tags:\n      ignore:\n        - /.*/\n  filter-master-only: &master-only\n    branches:\n      only:\n        - master\n  filter-tags-only: &official-tag\n    branches:\n      ignore:\n        - /.*/\n    tags:\n      only:\n        - /^pytest-monitor-.*/\n  matrix: &build-matrix\n      parameters:\n        python: [ \"3.8\", \"3.9\", \"3.10\", \"3.11\" ]\n        pytest: [ \"6.1\", \"7\" ]\n      exclude:\n        - pytest: \"6.1\"\n          python: \"3.11\"\n        - pytest: \"6.1\"\n          python: \"3.9\"\n        - pytest: \"6.1\"\n          python: \"3.10\"\n\ncommands:\n  make-env:\n    description: \"Create a brand new environment\"\n    parameters:\n      python:\n        type: string\n        default: \"3\"\n        description: \"Python version to use for building\"\n      pytest:\n        type: string\n        default: \"7\"\n        description: \"Pytest version to use for testing\"\n      use_specific_requirements_file:\n        type: string\n        default: \"requirements.txt\"\n        description: \"Add specific requirements listed in a file to the environment. \"\n      extra_deps:\n        type: string\n        default: \"\"\n        description: \"Extra dependencies to install (given as a space separated string)\"\n      channels:\n        type: string\n        default: \"https://conda.anaconda.org/conda-forge\"\n        description: \"List of channels for fetching packages\"\n      publish_mode:\n        type: boolean\n        default: false\n        description: \"If true, does not pin versions in requirements.txt\"\n    steps:\n      - when:\n          condition:\n            not: << parameters.publish_mode >>\n          steps:\n            - checkout\n            - run:\n                name: \"Apply dependency constraints\"\n                command: |\n                  if [ \"<< parameters.pytest >>\" != \"\" ]; then\n                    sed -i 's/^pytest/pytest=<< parameters.pytest >>/g' << parameters.use_specific_requirements_file >>\n                  fi\n                  echo \"\" >> << parameters.use_specific_requirements_file >>\n                  if [ \"<< parameters.extra_deps >>\" != \"\" ]; then\n                    for dep in << parameters.extra_deps >>\n                    do \n                      echo $dep >> << parameters.use_specific_requirements_file >>\n                    done\n                  fi\n            - run:\n                name: \"Create environment\"\n                command: |\n                  micromamba create -n project\n                  channels=$(echo << parameters.channels >> | sed \"s/ / -c /g\")\n                  requirements=$(cat << parameters.use_specific_requirements_file >> | tr '\\n' ' ')\n                  micromamba install -n project -y python=<< parameters.python >> pip $requirements -c $channels\n            - run:\n                name: \"Install project in environment\"\n                command: |\n                  eval \"$(micromamba shell hook --shell=bash)\"\n                  micromamba activate project\n                  python -m pip install -e .\n            - run:\n                name: \"Dumping env\"\n                command: |\n                  micromamba env export --name project --explicit > manifest.txt\n            - store_artifacts:\n                path: manifest.txt\n      - when:\n          condition: << parameters.publish_mode >>\n          steps:\n            - checkout\n            - run:\n                name: \"Create environment\"\n                command: |\n                  micromamba create -n project\n                  channels=$(echo << parameters.channels >> | sed \"s/ / -c /g\")\n                  requirements=$(cat requirements.txt | tr '\\n' ' ')\n                  micromamba install -n project -y python=<< parameters.python >> $requirements -c $channels\n                  micromamba install -n project -y << parameters.extra_deps >> -c $channels\n\n  lint-project:\n    description: \"Check code style\"\n    steps:\n      - run:\n          name: \"Check formatting (black)\"\n          command: |\n            eval \"$(micromamba shell hook --shell=bash)\"\n            micromamba activate project\n            black .\n      - run:\n          name: \"Check code style (flake8)\"\n          command: |\n            eval \"$(micromamba shell hook --shell=bash)\"\n            micromamba activate project\n            flake8 .\n      - run:\n          name: \"Check import order (isort)\"\n          command: |\n            eval \"$(micromamba shell hook --shell=bash)\"\n            micromamba activate project\n            isort .\n  test-project:\n    description: \"Run all the test and store the results\"\n    parameters:\n      runner:\n        type: string\n        default: \"pytest\"\n        description: \"Test executor\"\n      params:\n        type: string\n        default: \"-v\"\n        description: \"Test executor parameters\"\n    steps:\n      - run:\n          name: \"Launch test\"\n          command: |\n            eval \"$(micromamba shell hook --shell=bash)\"\n            micromamba activate project\n            mkdir test-results\n            << parameters.runner >> <<parameters.params >> --junit-xml=test-results/junit.xml\n      - store_test_results:\n          path: test-results/junit.xml\n      - store_artifacts:\n          path: test-results/junit.xml\n  inject-pypi:\n    description: \"Inject pypi credentials\"\n    steps:\n      - run:\n          name: \"Setup Pypi\"\n          command: |\n            echo -e \"[pypi]\" >> ~/.pypirc\n            echo -e \"username = __token__\" >> ~/.pypirc\n            echo -e \"password = $PYPI_PASSWORD\" >> ~/.pypirc\n\n  package-project:\n    description: \"Package project\"\n    steps:\n      - run:\n          name: \"Make Packages\"\n          command: |\n            eval \"$(micromamba shell hook --shell=bash)\"\n            micromamba activate project\n            python -m build\n  publish-project:\n    description: \"Send sdist and wheels to Pypi\"\n    steps:\n      - run:\n          name: \"Publish\"\n          command: |\n            eval \"$(micromamba shell hook --shell=bash)\"\n            micromamba activate project\n            twine upload dist/*\n          \n\n# Workflow definition\nworkflows:\n  PR:\n    jobs:\n      - lint\n      - build:\n          matrix: *build-matrix\n          name: \"build-py<< matrix.python >>-pytest << matrix.pytest >>\"\n          filters: *PR-only\n          requires:\n            - lint\n\n  deploy:\n    jobs:\n      - publish:\n          filters: *official-tag\n\n  nightly:\n    triggers:\n      - schedule:\n          cron: \"0 0 * * *\"\n          filters: *master-only\n    jobs:\n      - build:\n          python: \"3\"\n          pytest: \"7\"\n\n\njobs:\n  lint:\n    docker: *image\n    steps:\n      - make-env:\n          use_specific_requirements_file: requirements.dev.txt\n      - lint-project\n  build:\n    docker: *image\n    parameters:\n        python:\n            type: string\n        pytest:\n            type: string\n    steps:\n      - make-env:\n          extra_deps: mock\n          python: << parameters.python >>\n          pytest: << parameters.pytest >>\n      - test-project\n  publish:\n    docker: *image\n    steps:\n      - make-env:\n          extra_deps: twine setuptools build\n          channels: https://conda.anaconda.org/conda-forge defaults anaconda\n          publish_mode: true\n      - inject-pypi\n      - package-project\n      - publish-project"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: Bug report\nabout: Create a report to help us improve\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Describe the bug**\nA clear and concise description of what the bug is.\n\n**To Reproduce**\nSteps to reproduce the behavior:\n1. Go to '...'\n2. Click on '....'\n3. Scroll down to '....'\n4. See error\n\n**Expected behavior**\nA clear and concise description of what you expected to happen.\n\n**Screenshots**\nIf applicable, add screenshots to help explain your problem.\n\n**Desktop (please complete the following information):**\n - OS: [e.g. Linux, macOS, Windows]\n - Python version: [e.g. 3.9.7]\n - Pytest version: [e.g. 6.2.3]\n - pytest-monitor version: [e.g. 1.6.2]\n\n**Additional context**\nAdd any other context about the problem here.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Is your feature request related to a problem? Please describe.**\nA clear and concise description of what the problem is. Ex. I'm always frustrated when [...]\n\n**Describe the solution you'd like**\nA clear and concise description of what you want to happen.\n\n**Describe alternatives you've considered**\nA clear and concise description of any alternative solutions or features you've considered.\n\n**Additional context**\nAdd any other context or screenshots about the feature request here.\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "content": "<!--\n:tada: Thanks for submitting a PR to `pytest-monitor` :tada:\n\nThis template is here to guide you with your submission. Please fill it in thoroughly, but delete any session that seems irrelevant. (It's OK to leave unticked boxes for draft PRs or until details are cleared up).\n\nDo not hesitate to use the full extent of [Markdown formatting][markdown_formatting] to make your submission clearer and more explicit. You can see a preview of how the text renders by switching to the *Preview* tab just above this panel.\n\n[markdown_formatting]: https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\n\nIf all is clear, you can also delete this paragraph!\n-->\n\n# Description\n\n<!--\nPlease include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. (e.g. is a specific `pytest` version required ?)\n-->\n\nFixes #(issue)\n\n# Type of change\n\n<!--\nPlease delete options that are not relevant.\n-->\n\n- [ ] Bug fix (non-breaking change which fixes an issue)\n- [ ] New feature (non-breaking change which adds functionality)\n- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)\n- [ ] This change requires a documentation update\n\n# Checklist:\n\n<!--\nIf an option is not relevant to your PR, do not delete it but use ~strikethrough formating on it~. This helps keeping track of the entire list.\n-->\n\n- [ ] My code follows the style guidelines of this project\n- [ ] I have performed a self-review of my own code\n- [ ] I have commented my code, particularly in hard-to-understand areas\n- [ ] I have made corresponding changes to the documentation\n- [ ] My changes generate no new warnings\n- [ ] I have added tests that prove my fix is effective or that my feature works\n- [ ] New and existing unit tests pass locally with my changes (not just the [CI](https://link.to.ci))\n- [ ] Any dependent changes have been merged and published in downstream modules\n- [ ] I have provided a link to the issue this PR adresses in the Description section above (If there is none yet,\n[create one](https://github.com/CFMTech/pytest-monitor/issues) !)\n- [ ] I have updated the [changelog](https://github.com/CFMTech/pytest-monitor/blob/master/docs/sources/changelog.rst)\n- [ ] I have labeled my PR using appropriate tags (in particular using status labels like [`Status: Code Review Needed`](https://github.com/jsd-spif/pymonitor/labels/Status%3A%20Code%20Review%20Needed), [`Business: Test Needed`](https://github.com/jsd-spif/pymonitor/labels/Business%3A%20Test%20Needed) or [`Status: In Progress`](https://github.com/jsd-spif/pymonitor/labels/Status%3A%20In%20Progress) if you are still working on the PR)\n\nDo not forget to @ the people that needs to do the review\n\n<!--\nThanks for contributing! :pray:\n-->\n"
  },
  {
    "path": ".gitignore",
    "content": "**/.pymon\n.idea/\n\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nenv/\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n.hypothesis/\n.pytest_cache\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\n\n# Flask instance folder\ninstance/\n\n# Sphinx documentation\ndocs/sources/_build/\n\n# MkDocs documentation\n/site/\n\n# PyBuilder\ntarget/\n\n# IPython Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n"
  },
  {
    "path": ".gitlab-ci.yml",
    "content": "image: continuumio/miniconda\n\nstages:\n   - test\n   - deploy\n\nbefore_script:\n   - conda create -q -n pymon -y python=3.6\n   - conda install -q -n pymon psutil memory_profiler pytest -c https://conda.anaconda.org/conda-forge -c defaults -c anaconda -y\n   - source activate pymon\n   - python setup.py develop\n   - mkdir -p build/public\n   - mkdir public\n\npymon_run_test:\n   stage: test\n   script:\n      - pytest \n\npages:\n    stage: deploy\n    except:\n      - branchs\n    script:\n      - conda install --file docs/requirements.txt -c defaults -c conda-forge -c anaconda -c pkgs/main -y\n      - cd docs/sources/ && make html && cd -\n      - mv docs/sources/_build/html/* public/\n    artifacts:\n        paths:\n           - public/\n        expire_in: 1 year\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "repos:\n-   repo: local\n    hooks:\n    - id: black\n      name: black\n      entry: black \n      language: system\n      pass_filenames: true\n      types: [python]\n    - id: flake8\n      name: flake8\n      entry: flake8 --max-line-length=120\n      language: system\n      pass_filenames: true\n      types: [python]\n    - id: isort\n      name: isort\n      entry: isort \n      language: system\n      pass_filenames: true\n      types: [python]\n"
  },
  {
    "path": ".readthedocs.yml",
    "content": "# .readthedocs.yml\n# Read the Docs configuration file\n# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details\n\n# Required\nversion: 2\n\n# Build documentation in the docs/ directory with Sphinx\nsphinx:\n  configuration: docs/sources/conf.py\n\n# Optionally build your docs in additional formats such as PDF and ePub\nformats: all\n\nconda:\n  environment: docs/env.yml\n"
  },
  {
    "path": "AUTHORS",
    "content": "Project developed and lead by Jean-Sébastine Dieu.\n\nContributors include:\n - Raymond Gauthier (jraygauthier) added Python 3.5 support.\n - Kyle Altendorf (altendky) fixed bugs on session teardown\n - Hannes Engelhardt (veritogen) added Bitbucket CI support.\n"
  },
  {
    "path": "CONTRIBUTING.rst",
    "content": "=============================\nContribution, getting started\n=============================\n\nContributions are highly welcomed and appreciated.  Every little help counts,\nso do not hesitate!\n\n.. contents::\n   :depth: 2\n   :backlinks: none\n\nCreate your own development environment\n---------------------------------------\nWe use conda as our main packaging system, though pip work as well. Nevertheless, \nthe following instructions describe how to make your development environment using conda.\n\n#. Create a new environment:\n\n    conda create -n pytest-monitor-dev python=3 -c https://conda.anaconda.org/conda-forge -c defaults\n    \n#. Install the dependencies\n\n    conda install --file requirements.txt -n pytest-monitor-dev -c https://conda.anaconda.org/conda-forge -c defaults\n    \n#. Activate your environment\n\n    conda activate pytest-monitor-dev\n\n#. Install pytest-monitor in development mode\n\n    python setup.py develop\n\n#. You're done!\n\n\n.. _submitfeedback:\n\nFeature request and feebacks\n----------------------------\nWe'd like to hear about your propositions and suggestions. Feel free to\n`submit them as issues <https://github.com/CFMTech/pytest-monitor/issues>`_ and:\n\n* Explain in detail how they should work.\n* Keep the scope as narrow as possible.  This will make it easier to implement.\n\n\n.. _reportbugs:\n\nReport bugs\n-----------\nReport bugs for pytest-monitor in the issue tracker. Every filed bugs should include:\n * Your operating system name and version.\n * Any details about your local setup that might be helpful in troubleshooting, specifically:\n    * the Python interpreter version\n    * installed libraries\n    * and pytest version.\n * Detailed steps to reproduce the bug.\n\n.. _fixbugs:\n\nFix bugs\n--------\n\nLook through the `GitHub issues for bugs <https://github.com/CFMTech/pytest-monitor>`_.\n\n:ref:`Talk <contact>` to developers to find out how you can fix specific bugs.\n\nImplement features\n------------------\n\nLook through the `GitHub issues for enhancements <https://github.com/CFMTech/pytest-monitor/labels/type:%20enhancement>`_.\n\n:ref:`Talk <contact>` to developers to find out how you can implement specific\nfeatures.\n\n.. _`pull requests`:\n.. _pull-requests:\n\nPreparing Pull Requests\n-----------------------\n\nShort version\n~~~~~~~~~~~~~\n\n#. Fork the repository.\n#. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed.\n#. Target ``master`` for bugfixes and doc changes.\n#. Target ``features`` for new features or functionality changes.\n#. Follow **PEP-8** for naming and `black <https://github.com/psf/black>`_ for formatting.\n#. Tests are run using ``tox``::\n\n    tox -e linting,py37\n\n   The test environments above are usually enough to cover most cases locally.\n\n#. Write a ``changelog`` entry: ``changelog/2574.bugfix.rst``, use issue id number\n   and one of ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or\n   ``trivial`` for the issue type.\n#. Unless your change is a trivial or a documentation fix (e.g., a typo or reword of a small section) please\n   add yourself to the ``AUTHORS`` file, in alphabetical order.\n\n\nLong version\n~~~~~~~~~~~~\n\nWhat is a \"pull request\"?  It informs the project's core developers about the\nchanges you want to review and merge.  Pull requests are stored on\n`GitHub servers <https://github.com/CFMTech/pytest-monitor/pulls>`_.\nOnce you send a pull request, we can discuss its potential modifications and\neven add more commits to it later on. There's an excellent tutorial on how Pull\nRequests work in the\n`GitHub Help Center <https://help.github.com/articles/using-pull-requests/>`_.\n\nHere is a simple overview, with pytest-specific bits:\n\n#. Fork the\n   `pytest GitHub repository <https://github.com/CFMTech/pytest-monitor>`__.  It's\n   fine to use ``pytest`` as your fork repository name because it will live\n   under your user.\n\n#. Clone your fork locally using `git <https://git-scm.com/>`_ and create a branch::\n\n    $ git clone git@github.com:YOUR_GITHUB_USERNAME/pytest.git\n    $ cd pytest\n    # now, to fix a bug create your own branch off \"master\":\n\n        $ git checkout -b fix/your-bugfix-branch-name master\n\n    # or to instead add a feature create your own branch off \"master\":\n\n        $ git checkout -b feature/your-feature-branch-name master\n\n   Given we have \"major.minor.micro\" version numbers, bugfixes will usually\n   be released in micro releases whereas features will be released in\n   minor releases and incompatible changes in major releases.\n\n   If you need some help with Git, follow this quick start\n   guide: https://git.wiki.kernel.org/index.php/QuickStart\n\n#. Install `pre-commit <https://pre-commit.com>`_ and its hook on the pytest repo:\n\n   **Note: pre-commit must be installed as admin, as it will not function otherwise**::\n\n     $ pip install --user pre-commit\n     $ pre-commit install\n\n   Afterwards ``pre-commit`` will run whenever you commit.\n\n   https://pre-commit.com/ is a framework for managing and maintaining multi-language pre-commit hooks\n   to ensure code-style and code formatting is consistent.\n\n#. Install tox\n\n   Tox is used to run all the tests and will automatically setup virtualenvs\n   to run the tests in.\n   (will implicitly use http://www.virtualenv.org/en/latest/)::\n\n    $ pip install tox\n\n#. Run all the tests\n\n   You need to have Python 3.7 available in your system.  Now\n   running tests is as simple as issuing this command::\n\n    $ tox -e linting,py37\n\n   This command will run tests via the \"tox\" tool against Python 3.7\n   and also perform \"lint\" coding-style checks.\n\n#. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming.\n\n   You can pass different options to ``tox``. For example, to run tests on Python 3.7 and pass options to pytest\n   (e.g. enter pdb on failure) to pytest you can do::\n\n    $ tox -e py37 -- --pdb\n\n   Or to only run tests in a particular test module on Python 3.7::\n\n    $ tox -e py37 -- testing/test_config.py\n\n\n   When committing, ``pre-commit`` will re-format the files if necessary.\n\n#. If instead of using ``tox`` you prefer to run the tests directly, then we suggest to create a virtual environment and use\n   an editable install with the ``testing`` extra::\n\n       $ python3 -m venv .venv\n       $ source .venv/bin/activate  # Linux\n       $ .venv/Scripts/activate.bat  # Windows\n       $ pip install -e \".[testing]\"\n\n   Afterwards, you can edit the files and run pytest normally::\n\n       $ pytest testing/test_config.py\n\n\n#. Commit and push once your tests pass and you are happy with your change(s)::\n\n    $ git commit -a -m \"<commit message>\"\n    $ git push -u\n\n#. Create a new changelog entry in ``changelog``. The file should be named ``<issueid>.<type>.rst``,\n   where *issueid* is the number of the issue related to the change and *type* is one of\n   ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or ``trivial``. You may not create a\n   changelog entry if the change doesn't affect the documented behaviour of Pytest.\n\n#. Add yourself to ``AUTHORS`` file if not there yet, in alphabetical order.\n\n#. Finally, submit a pull request through the GitHub website using this data::\n\n    head-fork: YOUR_GITHUB_USERNAME/pytest\n    compare: your-branch-name\n\n    base-fork: pytest-dev/pytest\n    base: master          # if it's a bugfix\n    base: features        # if it's a feature\n\n"
  },
  {
    "path": "LICENSE",
    "content": "\nThe MIT License (MIT)\n\nCopyright (c) 2020 Capital Fund Management\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "include LICENSE\ninclude README.rst\n\nrecursive-exclude * __pycache__\nrecursive-exclude * *.py[co]\n"
  },
  {
    "path": "README.rst",
    "content": ".. image:: docs/sources/_static/pytestmonitor_readme.png\n   :width: 160\n   :align: center\n   :alt: Pytest-Monitor\n\n------\n\n==============\npytest-monitor\n==============\n\n.. image:: https://readthedocs.org/projects/pytest-monitor/badge/?version=latest\n    :target: https://pytest-monitor.readthedocs.io/en/latest/?badge=latest\n    :alt: Documentation Status\n\n.. image:: https://img.shields.io/pypi/v/pytest-monitor.svg\n    :target: https://pypi.org/project/pytest-monitor\n    :alt: PyPI version\n\n.. image:: https://img.shields.io/pypi/pyversions/pytest-monitor.svg\n    :target: https://circleci.com/gh/jsd-spif/pymonitor.svg?style=svg&circle-token=cdf89a7212139aff0cc236227cb519363981de0b\n    :alt: Python versions\n\n.. image:: https://circleci.com/gh/CFMTech/pytest-monitor/tree/master.svg?style=shield&circle-token=054adaaf6a19f4f55a4f0ad419649f1807e70ea9\n    :target: https://circleci.com/gh/CFMTech/pytest-monitor/tree/master\n    :alt: See Build Status on Circle CI\n\n.. image:: https://anaconda.org/conda-forge/pytest-monitor/badges/platforms.svg\n    :target: https://anaconda.org/conda-forge/pytest-monitor\n\n.. image:: https://anaconda.org/conda-forge/pytest-monitor/badges/version.svg\n    :target: https://anaconda.org/conda-forge/pytest-monitor\n\n.. image:: https://img.shields.io/badge/License-MIT-blue.svg\n    :target: https://opensource.org/licenses/MIT\n    \n\nPytest-monitor is a pytest plugin designed for analyzing resource usage.\n\n----\n\n\nFeatures\n--------\n\n- Analyze your resources consumption through test functions:\n\n  * memory consumption\n  * time duration\n  * CPU usage\n- Keep a history of your resource consumption measurements.\n- Compare how your code behaves between different environments.\n\n\nUsage\n-----\n\nSimply run *pytest* as usual: *pytest-monitor* is active by default as soon as it is installed.\nAfter running your first session, a .pymon `sqlite` database will be accessible in the directory where pytest was run.\n\nExample of information collected for the execution context:\n\n+-----------------------------------+-----------+-------------------+---------+-------------------------------------------+---------------+--------------------+------------+-------------------------------+-------------------------------+--------------------------------------------------+\n|                              ENV_H|  CPU_COUNT|  CPU_FREQUENCY_MHZ| CPU_TYPE|                                 CPU_VENDOR|  RAM_TOTAL_MB |       MACHINE_NODE |MACHINE_TYPE| MACHINE_ARCH                  |  SYSTEM_INFO                  |                                       PYTHON_INFO|\n+===================================+===========+===================+=========+===========================================+===============+====================+============+===============================+===============================+==================================================+                   \n|  8294b1326007d9f4c8a1680f9590c23d |        36 |              3000 |  x86_64 | Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz  |      772249   | some.host.vm.fr    |     x86_64 |       64bit                   | Linux - 3.10.0-693.el7.x86_64 | 3.6.8 (default, Jun 28 2019, 11:09:04) \\n[GCC ...|\n+-----------------------------------+-----------+-------------------+---------+-------------------------------------------+---------------+--------------------+------------+-------------------------------+-------------------------------+--------------------------------------------------+\n\nHere is an example of collected data stored in the result database:\n\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n|                      RUN_DATE|                             ENV_H|                                    SCM_ID|             ITEM_START_TIME|                                    ITEM|      KIND| COMPONENT|  TOTAL_TIME|  USER_TIME|  KERNEL_TIME|   CPU_USAGE|  MEM_USAGE|\n+==============================+==================================+==========================================+============================+========================================+==========+==========+============+===========+=============+============+===========+\n|   2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:36.890477 |             pkg1.test_mod1/test_sleep1 | function |     None |   1.005669 |      0.54 |       0.06  |  0.596618  | 1.781250  |\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n|   2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:39.912029 |       pkg1.test_mod1/test_heavy[10-10] | function |     None |   0.029627 |      0.55 |        0.08 |  21.264498 |  1.781250 |\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n|   2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:39.948922 |     pkg1.test_mod1/test_heavy[100-100] | function |     None |   0.028262 |      0.56 |        0.09 |  22.998773 |  1.781250 |\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n|   2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:39.983869 |   pkg1.test_mod1/test_heavy[1000-1000] | function |     None |   0.030131 |      0.56 |        0.10 |  21.904277 |  2.132812 |\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n|   2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:40.020823 | pkg1.test_mod1/test_heavy[10000-10000] | function |     None |   0.060060 |      0.57 |        0.14 |  11.821601 | 41.292969 |\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n|   2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:40.093490 |        pkg1.test_mod2/test_sleep_400ms | function |     None |   0.404860 |      0.58 |        0.15 |   1.803093 |  2.320312 |\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n|   2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:40.510525 |      pkg2.test_mod_a/test_master_sleep | function |     None |   5.006039 |      5.57 |        0.15 |   1.142620 |  2.320312 |\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n|   2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:45.530780 |          pkg3.test_mod_cl/test_method1 | function |     None |   0.030505 |      5.58 |        0.16 | 188.164762 |  2.320312 |\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n|   2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:50.582954 |     pkg4.test_mod_a/test_force_monitor | function |     test |   1.005015 |     11.57 |       0.17  | 11.681416  | 2.320312  |\n+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+\n\nDocumentation\n-------------\n\nA full documentation is `available <https://pytest-monitor.readthedocs.io/en/latest/?badge=latest>`_.\n\nInstallation\n------------\n\nYou can install *pytest-monitor* via *conda* (through the `conda-forge` channel)::\n\n    $ conda install pytest-monitor -c https://conda.anaconda.org/conda-forge\n\nAnother possibility is to install *pytest-monitor* via `pip`_ from `PyPI`_::\n\n    $ pip install pytest-monitor\n\n\nRequirements\n------------\n\nYou will need a valid Python 3.5+ interpreter. To get measures, we rely on:\n\n- *psutil* to extract CPU usage\n- *memory_profiler* to collect memory usage\n- and *pytest* (obviously!)\n\n**Note: this plugin doesn't work with unittest**\n\nStorage backends\n----------------\nBy default, pytest-monitor stores its result in a local SQLite3 local database, making results accessible.\nIf you need a more powerful way to analyze your results, checkout the\n`monitor-server-api`_ which brings both a REST Api for storing and historize your results and an API to query your data.\nAn alternative service (using MongoDB) can be used thanks to a contribution from @dremdem: `pytest-monitor-backend`_.\n\n\nContributing\n------------\n\nContributions are very welcome. Tests can be run with `tox`_. Before submitting a pull request, please ensure\nthat:\n\n* both internal tests and examples are passing.\n* internal tests have been written if necessary.\n* if your contribution provides a new feature, make sure to provide an example and update the documentation accordingly.\n\nLicense\n-------\n\nThis code is distributed under the `MIT`_ license.  *pytest-monitor* is free, open-source software.\n\n\nIssues\n------\n\nIf you encounter any problem, please `file an issue`_ along with a detailed description.\n\nAuthor\n------\n\nThe main author of `pytest-monitor` is Jean-Sébastien Dieu, who can be reached at jdieu@salsify.fr.\n\n----\n\nThis `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `cookiecutter-pytest-plugin`_ template.\n\n.. _`Cookiecutter`: https://github.com/audreyr/cookiecutter\n.. _`@hackebrot`: https://github.com/hackebrot\n.. _`MIT`: http://opensource.org/licenses/MIT\n.. _`BSD-3`: http://opensource.org/licenses/BSD-3-Clause\n.. _`GNU GPL v3.0`: http://www.gnu.org/licenses/gpl-3.0.txt\n.. _`Apache Software License 2.0`: http://www.apache.org/licenses/LICENSE-2.0\n.. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin\n.. _`file an issue`: https://github.com/CFMTech/pytest-monitor/issues\n.. _`pytest`: https://github.com/pytest-dev/pytest\n.. _`tox`: https://tox.readthedocs.io/en/latest/\n.. _`pip`: https://pypi.org/project/pip/\n.. _`PyPI`: https://pypi.org/project\n.. _`monitor-server-api`: : https://github.com/CFMTech/monitor-server-api\n.. _`pytest-monitor-backend`: https://github.com/dremdem/pytest-monitor-backend\n"
  },
  {
    "path": "docs/env.yml",
    "content": "name: docenv\n\nchannels:\n  - anaconda\n\ndependencies:\n  - python==3.7\n  - pip:\n     - alabaster==0.7.12\n     - asn1crypto==1.3.0\n     - Babel==2.8.0\n     - certifi==2019.11.28\n     - cffi==1.13.2\n     - chardet==3.0.4\n     - cryptography==2.8\n     - docutils==0.16\n     - idna==2.8\n     - imagesize==1.2.0\n     - Jinja2==2.11.1\n     - lz4==3.0.2\n     - MarkupSafe==1.1.1\n     - packaging==20.1\n     - pycparser==2.19\n     - Pygments==2.5.2\n     - pyOpenSSL==19.1.0\n     - pyparsing==2.4.6\n     - PySocks==1.7.1\n     - pytz==2019.3\n     - releases==1.6.3\n     - requests==2.22.0\n     - semantic-version==2.6.0\n     - six==1.14.0\n     - snowballstemmer==2.0.0\n     - Sphinx==2.3.1\n     - sphinx-rtd-theme==0.4.3\n     - sphinxcontrib-applehelp==1.0.1\n     - sphinxcontrib-devhelp==1.0.1\n     - sphinxcontrib-htmlhelp==1.0.2\n     - sphinxcontrib-jsmath==1.0.1\n     - sphinxcontrib-qthelp==1.0.2\n     - sphinxcontrib-serializinghtml==1.1.3\n     - urllib3==1.25.8\n"
  },
  {
    "path": "docs/requirements.txt",
    "content": "alabaster\nbabel\nsphinx\nsphinx-releases\nsphinx_rtd_theme\nsemantic_version==2.6.*\nmake\npygraphviz\n"
  },
  {
    "path": "docs/sources/Makefile",
    "content": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nPAPER         =\nBUILDDIR      = _build\n\n# User-friendly check for sphinx-build\nifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)\n$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)\nendif\n\n# Internal variables.\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n# the i18n builder cannot share the environment and doctrees with the others\nI18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n\n.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext\n\nhelp:\n\t@echo \"Please use \\`make <target>' where <target> is one of\"\n\t@echo \"  html       to make standalone HTML files\"\n\t@echo \"  dirhtml    to make HTML files named index.html in directories\"\n\t@echo \"  singlehtml to make a single large HTML file\"\n\t@echo \"  pickle     to make pickle files\"\n\t@echo \"  json       to make JSON files\"\n\t@echo \"  htmlhelp   to make HTML files and a HTML help project\"\n\t@echo \"  qthelp     to make HTML files and a qthelp project\"\n\t@echo \"  applehelp  to make an Apple Help Book\"\n\t@echo \"  devhelp    to make HTML files and a Devhelp project\"\n\t@echo \"  epub       to make an epub\"\n\t@echo \"  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \"  latexpdf   to make LaTeX files and run them through pdflatex\"\n\t@echo \"  latexpdfja to make LaTeX files and run them through platex/dvipdfmx\"\n\t@echo \"  text       to make text files\"\n\t@echo \"  man        to make manual pages\"\n\t@echo \"  texinfo    to make Texinfo files\"\n\t@echo \"  info       to make Texinfo files and run them through makeinfo\"\n\t@echo \"  gettext    to make PO message catalogs\"\n\t@echo \"  changes    to make an overview of all changed/added/deprecated items\"\n\t@echo \"  xml        to make Docutils-native XML files\"\n\t@echo \"  pseudoxml  to make pseudoxml-XML files for display purposes\"\n\t@echo \"  linkcheck  to check all external links for integrity\"\n\t@echo \"  doctest    to run all doctests embedded in the documentation (if enabled)\"\n\t@echo \"  coverage   to run coverage check of the documentation (if enabled)\"\n\nclean:\n\trm -rf $(BUILDDIR)/*\n\nhtml:\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\ndirhtml:\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\nsinglehtml:\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\npickle:\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\njson:\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\nhtmlhelp:\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t      \".hhp project file in $(BUILDDIR)/htmlhelp.\"\n\nqthelp:\n\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp\n\t@echo\n\t@echo \"Build finished; now you can run \"qcollectiongenerator\" with the\" \\\n\t      \".qhcp project file in $(BUILDDIR)/qthelp, like this:\"\n\t@echo \"# qcollectiongenerator $(BUILDDIR)/qthelp/pytest-cookiecutterplugin_name.qhcp\"\n\t@echo \"To view the help file:\"\n\t@echo \"# assistant -collectionFile $(BUILDDIR)/qthelp/pytest-cookiecutterplugin_name.qhc\"\n\napplehelp:\n\t$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp\n\t@echo\n\t@echo \"Build finished. The help book is in $(BUILDDIR)/applehelp.\"\n\t@echo \"N.B. You won't be able to view it unless you put it in\" \\\n\t      \"~/Library/Documentation/Help or install it in your application\" \\\n\t      \"bundle.\"\n\ndevhelp:\n\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp\n\t@echo\n\t@echo \"Build finished.\"\n\t@echo \"To view the help file:\"\n\t@echo \"# mkdir -p $$HOME/.local/share/devhelp/pytest-cookiecutterplugin_name\"\n\t@echo \"# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pytest-cookiecutterplugin_name\"\n\t@echo \"# devhelp\"\n\nepub:\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\nlatex:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in $(BUILDDIR)/latex.\"\n\t@echo \"Run \\`make' in that directory to run these through (pdf)latex\" \\\n\t      \"(use \\`make latexpdf' here to do that automatically).\"\n\nlatexpdf:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through pdflatex...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\nlatexpdfja:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through platex and dvipdfmx...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\ntext:\n\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text\n\t@echo\n\t@echo \"Build finished. The text files are in $(BUILDDIR)/text.\"\n\nman:\n\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(BUILDDIR)/man.\"\n\ntexinfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo\n\t@echo \"Build finished. The Texinfo files are in $(BUILDDIR)/texinfo.\"\n\t@echo \"Run \\`make' in that directory to run these through makeinfo\" \\\n\t      \"(use \\`make info' here to do that automatically).\"\n\ninfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo \"Running Texinfo files through makeinfo...\"\n\tmake -C $(BUILDDIR)/texinfo info\n\t@echo \"makeinfo finished; the Info files are in $(BUILDDIR)/texinfo.\"\n\ngettext:\n\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale\n\t@echo\n\t@echo \"Build finished. The message catalogs are in $(BUILDDIR)/locale.\"\n\nchanges:\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes\n\t@echo\n\t@echo \"The overview file is in $(BUILDDIR)/changes.\"\n\nlinkcheck:\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t      \"or in $(BUILDDIR)/linkcheck/output.txt.\"\n\ndoctest:\n\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest\n\t@echo \"Testing of doctests in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/doctest/output.txt.\"\n\ncoverage:\n\t$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage\n\t@echo \"Testing of coverage in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/coverage/python.txt.\"\n\nxml:\n\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml\n\t@echo\n\t@echo \"Build finished. The XML files are in $(BUILDDIR)/xml.\"\n\npseudoxml:\n\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml\n\t@echo\n\t@echo \"Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml.\"\n"
  },
  {
    "path": "docs/sources/changelog.rst",
    "content": "=========\nChangelog\n=========\n\n* :release:`to be discussed`\n* :feature:`#75` Automatically gather CI build information for Bitbucket CI.\n\n* :release:`1.6.6 <2023-05-06>`\n* :bug:`#64` Prepare version 1.7.0 of pytest-monitor. Last version to support Python <= 3.7 and all pytest <= 5.*\n* :bug:`#0` Improve and fix some CI issues, notably one that may cause python to not be the requested one but a more recent one.\n\n* :release:`1.6.5 <2022-10-16>`\n* :bug:`#60` Make sure that when psutil cannot fetch cpu frequency, the fallback mechanism is used.\n\n* :release:`1.6.4 <2022-05-18>`\n* :bug:`#56` Force the CPU frequency to 0 and emit a warning when unable to fetch it from the system.\n* :bug:`#54` Fix a bug that crashes the monitor upon non ASCII characters in commit log under Perforce. Improved P4 change number extraction.\n\n* :release:`1.6.3 <2021-12-22>`\n* :bug:`#50` Fix a bug where a skipping fixture resulted in an exception during teardown.\n\n* :release:`1.6.2 <2021-08-24>`\n* :bug:`#40` Fix a bug that cause the garbage collector to be disable by default.\n\n* :release:`1.6.1 <2021-08-23>`\n* :bug:`#43` Fixes a bug that prevent sending session tags correctly.\n* :bug:`#40` Force garbage collector to run between tests (better result accuracy)\n\n* :release:`1.6.0 <2021-04-16>`\n* :feature:`#0` Support for python 3.5\n* :feature:`#35` Better support for Doctest item.\n* :feature:`#24` Prefer JSON data type for storing session extended information instead of plain text.\n\n\n* :release:`1.5.1 <2021-02-05>`\n* :bug:`#31` Rename option --remote into --remote-server as it seems to conflict with some plugins.  \n* :bug:`#23` Fix requirements minimum version.\n\n* :release:`1.5.0 <2020-11-20>`\n* :feature:`25` Automatically gather CI build information (supported CI are Drone CI, Gitlab CI, Jenkins CI, Travis CI, Circle CI)\n* :bug:`#23 major` psutil min requirement is now 5.1.0\n* :bug:`#28 major` Fix a bug that cause output to be printed multiple times\n\n* :release:`1.4.0 <2020-06-04>`\n* :feature:`21` Using json format to populate the RUN_DESCRIPTION field (through --description and --tag fields)\n\n* :release:`1.3.0 <2020-05-12>`\n* :feature:`19` Normalized http codes used for sending metrics to a remote server.\n\n* :release:`1.2.0 <2020-04-17>`\n* :feature:`13` Change default analysis scope to function.\n* :bug:`12 major` No execution contexts pushed when using a remote server.\n* :bug:`14 major` A local database is always created even with --no-db option passed.\n\n* :release:`1.1.1 <2020-03-31>`\n* :bug:`9` Fix remote server interface for sending measures.\n\n* :release:`1.1.0 <2020-03-30>`\n* :feature:`5` Extend item information and separate item from its variants.\n* :feature:`3` Compute user time and kernel time on a per test basis for clarity and ease of exploitation.\n* :feature:`4` Added an option to add a description to a pytest run\n\n* :release:`1.0.1 <2020-03-18>`\n* :bug:`2` pytest-monitor hangs infinitely when a pytest outcome (skip, fail...) is issued.\n\n* :release:`1.0.0 <2020-02-20>`\n* :feature:`0` Initial release\n"
  },
  {
    "path": "docs/sources/conf.py",
    "content": "# -*- coding: utf-8 -*-\n#\n# pytest-monitor documentation build configuration file, created by\n# sphinx-quickstart on Thu Oct  1 00:43:18 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport pathlib\n\n\ndef read_version():\n    init = pathlib.Path(__file__).parent.parent.parent / \"pytest_monitor\" / \"__init__.py\"\n    with init.open(\"r\") as pkg_init_f:\n        version_read = [line.strip() for line in pkg_init_f if line.startswith(\"__version__\")]\n    if len(version_read) > 1:\n        raise ValueError('Multiple version found in \"pytest_monitor\" package!')\n    if not version_read:\n        raise ValueError('No version found in \"pytest_monitor\" package!')\n    return version_read[0].split(\"=\", 1)[1].strip(\"\\\" '\")\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n    \"sphinx.ext.ifconfig\",\n    \"sphinx.ext.todo\",\n    \"sphinx.ext.graphviz\",\n    \"releases\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"pytest-monitor\"\ncopyright = \"2019, Jean-Sébastien Dieu\"  # noqa A001\nauthor = \"Jean-Sébastien Dieu\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = read_version()\n# The full version, including alpha/beta/rc tags.\nrelease = f\"pytest-monitor v{version}\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\ntodo_emit_warnings = True\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents.  If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"_static/pytestmonitor_alpha.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pytestmonitor-doc\"\n"
  },
  {
    "path": "docs/sources/configuration.rst",
    "content": "========================\nConfiguring your session\n========================\n\n`pytest-monitor` gives you flexibility for running your test suite.\nIn this section, we will discuss the different available options, and how they influence the `pytest` session.\n\nScope Restriction\n-----------------\n\n`pytest-monitor` is able to restrict the scope of the analysis. As a default, \nonly tests functions discovered by pytest are monitored.\n\nSometime, you might want to monitor a whole module or test session. This can be\nachieved thanks to the *\\-\\-restrict-scope-to* option. \n\nIf a scope restriction is set, then the monitoring will be performed at the selected levels.\nFor example, monitoring at both function and module level can be achieved by the following command:\n\n.. code-block:: shell\n\n    pytest --restrict-scope-to function,module\n\nAccepted values are:\n \n * function: test functions will be monitored individually, leading to one entry per test function.\n * module: each discovered module will be monitored regardless of the others.\n * class: test class objects will be monitored individually.\n * session: monitor the whole session.\n\nIt is important to realize that using multiple scopes has an impact on the monitoring measures. For example, the `pytest-monitor` code that monitors functions does consume resources for each function (notably compute time). As a consequence, the resources consumed by their module will include the resources consumed by `pytest-monitor` for each function. If individual functions were not monitored, the resource consumption reported for the module would therefore be lower.\n\nDue to the way `pytest` handles test modules, some specificities apply when monitoring modules:\n\n * The total measured elapsed time includes the setup/teardown process for each function.\n   On the other hand, a function object measures only the duration of the function run (without the setup and teardown parts).\n * Consumed memory will be the peak of memory usage during the whole module run.\n\n\nHandling parameterized tests\n----------------------------\n\nParameterized tests can be introspected by `pytest-monitor` during the setup phase: their real\nname is based on the parameter values. This uses the string representation of the parameters (so you  want to make sure that this representation suits your needs).\n\nLet's consider the following test:\n\n.. code-block:: python\n\n    @pytest.mark.parametrize(('asint', 'asstr'), [(10, \"10\"), (100, \"100\"), (1000, \"1000\"), (10000, \"10000\")])\n    def test_p(asint, asstr):\n        assert asint == int(asstr)\n\nBy default, `pytest-monitor` will generate the following entries:\n\n * test_p[10-10]\n * test_p[100-100]\n * test_p[1000-1000]\n * test_p[10000-10000]\n\n\nYou can ask `pytest-monitor` to tag parameters with their names (as provided by ``@pytest.mark.parametrize``), with the following option:\n\n.. code-block:: shell\n\n    pytest --parametrization-explicit\n\nwhich will lead to the following entries:\n\n * test_p[asint_10-asstr_10]\n * test_p[asint_100-asstr_100]\n * test_p[asint_1000-asstr_1000]\n * test_p[asint_10000-asstr_10000]\n\n\nDisable monitoring\n------------------\n\nIf you need for some reason to disable the monitoring, pass the *\\-\\-no-monitor* option.\n\n\nDescribing a run\n----------------\n\nSometimes, you might want to compare identical state of your code. In such cases, relying only on the scm\nreferences and the run date of the session is not sufficient. For that, `pytest-monitor` can assist you by tagging\nyour session using description and tags.\n\n\nDescription and tags\n~~~~~~~~~~~~~~~~~~~~\nThe description should be used to provide a brief summary of your run while tags can be used to\nset special information you want to focus during your analysis. \nSetting a description is as simple as this:\n\n.. code-block:: shell\n\n    bash $> pytest --description \"Any run description you want\"\n\n\nFlagging your session with specific information is as complex as setting the description:\n\n.. code-block:: shell\n\n    bash $> pytest --tag pandas=1.0.1 --tag numpy=1.17\n\nThis will result in a session with the following description:\n\n.. code-block:: text\n\n    {\n        \"pandas\": \"1.0.1\",\n        \"numpy\": \"1.17\"\n    }\n\n\nYou can perfectly use both options to fully describe your session:\n\n.. code-block:: shell\n\n    bash $> pytest --tag pandas=1.0.1 --tag numpy=1.17 --description \"Your summary\"\n\nThis will result in a session with the following description:\n\n.. code-block:: text\n\n    {\n        \"msg\": \"Your summary\",\n        \"pandas\": \"1.0.1\",\n        \"numpy\": \"1.17\"\n    }\n\nDescribing a CI build\n~~~~~~~~~~~~~~~~~~~~~\nFor convenience pytest-monitor automatically extends the session's description with some information\nextracted from the CI build. For that purpose, pytest-monitor reads the environment\nat the start of the test session in search for:\n * **pipeline_branch**, which can either represent a CI pipeline name (preferentially) or the source code branch name.\n * **pipeline_build_no**, which is the pipeline build number (if available) or the pipeline ID if any.\n * **__ci__** which provides you the ci system used.\n\nCurrently, pytest-monitor supports the following CI:\n * Gitlab CI\n * Travis CI\n * Jenkins\n * Drone CI\n * Circle CI\n * Bitbucket CI\n\nThe following table explains how both fields are mapped:\n\n+--------------+-----------------------------------+-----------------------+---------------+\n|       CI     |     pipeline_branch               | pipeline_build_no     |  __ci__       |\n+==============+===================================+=======================+===============+\n|  Jenkins CI  |  BRANCH_NAME if set else JOB_NAME | BUILD_NUMBER          |   jenkinsci   |\n+--------------+-----------------------------------+-----------------------+---------------+\n|  Drone CI    |  DRONE_REPO_BRANCH                | DRONE_BUILD_NUMBER    |   droneci     |\n+--------------+-----------------------------------+-----------------------+---------------+\n|  Circle CI   |  CIRCLE_JOB                       | CIRCLE_BUILD_NUM      |   circleci    |\n+--------------+-----------------------------------+-----------------------+---------------+\n|  Gitlab CI   |  CI_JOB_NAME                      | CI_PIPELINE_ID        |   gitlabci    |\n+--------------+-----------------------------------+-----------------------+---------------+\n|  Travis CI   |  TRAVIS_BUILD_ID                  | TRAVIS_BUILD_NUMBER   |   travisci    |\n+--------------+-----------------------------------+-----------------------+---------------+\n|  Bitbucket CI|  BITBUCKET_BRANCH                 | BITBUCKET_BUILD_NUMBER|   bitbucketci |\n+--------------+-----------------------------------+-----------------------+---------------+\n\nNote that none of these two fields will be added if:\n * the CI context is incomplete\n * the CI context cannot be computed.\n\nParameters affecting measures\n-----------------------------\nBy default, pytest-monitor runs the garbage collector prior to execute the test function.\nThis leads to finer memory measurements. In the case where you want to disable this call to the\ngarbage collector, you just have to set the option `--no-gc` on the command line.\n\n.. code-block:: shell\n\n    bash $> pytest --no-gc\n\nForcing CPU frequency\n---------------------\nUnder some circumstances, you may want to set the CPU frequency instead of asking `pytest-monitor` to compute it.\nTo do so, you can either:\n - ask `pytest-monitor` to use a preset value if it does not manage to compute the CPU frequency\n - or to not try computing the CPU frequency and use your preset value.\n\n Two environment variables controls this behaviour:\n - `PYTEST_MONITOR_CPU_FREQ` allows you to preset a value for the CPU frequency. It must be a float convertible value.\n This value will be used if `pytest-monitor` cannot compute the CPU frequency. Otherwise, `0.0` will be used as a\n default value.\n - `PYTEST_MONITOR_FORCE_CPU_FREQ` instructs `pytest-monitor` to try computing the CPU frequency or not. It expects an\n integer convertible value. If not set, or if the integer representation of the value is `0`, then `pytest-monitor` will\n try to compute the cpu frequency and defaults to the usecase describe for the previous environment variable.\n If it set and not equal to `0`, then we use the value that the environment variable `PYTEST_MONITOR_CPU_FREQ` holds\n (`0.0` if not set).\n"
  },
  {
    "path": "docs/sources/contributing.rst",
    "content": "==================\nContribution guide\n==================\n\nIf you want to contribute to this project, you are welcome to do so!\n\nCreate your own development environment\n---------------------------------------\nWe use conda as our main packaging system, though pip works as well.\n\nThe following instructions describe how to create your development environment using conda:\n\n#. Create a new environment:\n\n    .. code-block:: bash\n\n       conda create -n pytest-monitor-dev python=3 -c https://conda.anaconda.org/conda-forge -c defaults\n        \n#. Install the dependencies:\n\n    .. code-block:: bash\n\n       conda install --file requirements.dev.txt -n pytest-monitor-dev -c https://conda.anaconda.org/conda-forge -c defaults\n        \n#. Make sure to have pip install or install it if missing:\n\n    .. code-block:: bash\n\n        # Check for pip\n        conda list | grep pip\n        # Install if needed\n        conda install -n pytest-monitor-dev pip -c https://conda.anaconda.org/conda-forge\n\n#. Activate your environment:\n\n    .. code-block:: bash\n\n        conda activate pytest-monitor-dev\n\n#. Install `pytest-monitor` in development mode:\n\n    .. code-block:: bash\n\n       python -m pip install -e \".[dev]\"\n\n#. Install the pre-commit hooks\n    .. code-block:: bash\n\n       pre-commit install\n\n#. You're done!\n\nFeature requests and feedback\n-----------------------------\n\nWe would be happy to hear about your propositions and suggestions. Feel free to\n`submit them as issues <https://github.com/CFMTech/pytest-monitor/issues>`_ and:\n\n* Explain in details the expected behavior.\n* Keep the scope as narrow as possible.  This will make them easier to implement.\n\n\n.. _reportbugs:\n\nBug reporting\n-------------\n\nReport bugs for `pytest-monitor` in the `issue tracker <https://github.com/CFMTech/pytest-monitor/issues>`_. Every filed bugs should include:\n\n * Your operating system name and version.\n * Any details about your local setup that might be helpful in troubleshooting, specifically:\n     * the Python interpreter version,\n     * installed libraries,\n     * and your `pytest` version.\n * Detailed steps to reproduce the bug.\n\n.. _fixbugs:\n\nBug fixing\n----------\n\nLook through the `GitHub issues for bugs <https://github.com/CFMTech/pytest-monitor/issues>`_.\nTalk to developers to find out how you can fix specific bugs.\n\nFeature implementation\n----------------------\n\nLook through the `GitHub issues for enhancements <https://github.com/CFMTech/pytest-monitor/labels/type:%20enhancement>`_.\n\nTalk to developers to find out how you can implement specific features.\n\nThank you!\n"
  },
  {
    "path": "docs/sources/index.rst",
    "content": ".. pytest-monitor documentation master file, created by\n   sphinx-quickstart on Thu Oct  1 00:43:18 2015.\n   You can adapt this file completely to your liking, but it should at least\n   contain the root `toctree` directive.\n\nWelcome to pytest-monitor's documentation!\n===============================================================\n\nContents:\n\n.. toctree::\n   :maxdepth: 2\n\n   introduction\n   installation\n   configuration\n   run\n   operating\n   remote\n   contributing \n   changelog\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n"
  },
  {
    "path": "docs/sources/installation.rst",
    "content": "============\nInstallation\n============\n\n`pytest-monitor` is a plugin for `pytest`.\n\nSupported environments\n----------------------\n\n`pytest-monitor` currently works on *Linux* and *macOS*. Support for *Windows* is experimental and not tested.\n\n**You will need pytest 4.4+ to run pytest-monitor.**\n\nWe support all versions of Python >= 3.6.\n\n\nFrom conda\n----------\n\nSimply run the following command to get it installed in your current environment\n\n.. code-block:: bash\n\n    conda install pytest-monitor -c https://conda.anaconda.org/conda-forge\n\n\nFrom pip\n--------\n\nSimply run the following command to get it installed\n\n.. code-block:: bash\n\n    pip install pytest-monitor\n"
  },
  {
    "path": "docs/sources/introduction.rst",
    "content": "============\nIntroduction\n============\n\n`pytest-monitor` tracks the resources (like memory and compute time) consumed by a test suite, so that you\ncan make sure that your code does not use too much of them.\n\nThanks to `pytest-monitor`, you can check resource consumption in particular through continuous integration, as this is done by monitoring the consumption of test functions. These tests can be functional (as usual) or be dedicated to the resource consumption checks.\n\nUse cases\n---------\n\nExamples of use cases include technical stack updates, and code evolutions.\n\nTechnical stack updates\n~~~~~~~~~~~~~~~~~~~~~~~\n\nIn the Python world, libraries often depends on several packages. By updating some (or all) of the dependencies,\nyou update code that you do not own and therefore do not control. Tracking your application's resource footprint\ncan prevent unwanted resource consumption, and can thus validate the versions of the packages that you depend on.\n\nCode evolution\n~~~~~~~~~~~~~~\n\nExtending your application with new features, or fixing its bugs, might have an impact on the core of your program. The performance of large applications or libraries can be difficult to assess, but by monitoring resource consumption, `pytest-monitor` allows you to check that despite code udpates, the performance of your code remains within desirable limits.\n\n\nUsage\n-----\n\nSimply run pytest as usual: pytest-monitor is active by default as soon as it is installed. After running your first session, a .pymon sqlite database will be accessible in the directory where pytest was run.\n"
  },
  {
    "path": "docs/sources/make.bat",
    "content": "@ECHO OFF\n\nREM Command file for Sphinx documentation\n\nif \"%SPHINXBUILD%\" == \"\" (\n\tset SPHINXBUILD=sphinx-build\n)\nset BUILDDIR=_build\nset ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .\nset I18NSPHINXOPTS=%SPHINXOPTS% .\nif NOT \"%PAPER%\" == \"\" (\n\tset ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%\n\tset I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%\n)\n\nif \"%1\" == \"\" goto help\n\nif \"%1\" == \"help\" (\n\t:help\n\techo.Please use `make ^<target^>` where ^<target^> is one of\n\techo.  html       to make standalone HTML files\n\techo.  dirhtml    to make HTML files named index.html in directories\n\techo.  singlehtml to make a single large HTML file\n\techo.  pickle     to make pickle files\n\techo.  json       to make JSON files\n\techo.  htmlhelp   to make HTML files and a HTML help project\n\techo.  qthelp     to make HTML files and a qthelp project\n\techo.  devhelp    to make HTML files and a Devhelp project\n\techo.  epub       to make an epub\n\techo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\n\techo.  text       to make text files\n\techo.  man        to make manual pages\n\techo.  texinfo    to make Texinfo files\n\techo.  gettext    to make PO message catalogs\n\techo.  changes    to make an overview over all changed/added/deprecated items\n\techo.  xml        to make Docutils-native XML files\n\techo.  pseudoxml  to make pseudoxml-XML files for display purposes\n\techo.  linkcheck  to check all external links for integrity\n\techo.  doctest    to run all doctests embedded in the documentation if enabled\n\techo.  coverage   to run coverage check of the documentation if enabled\n\tgoto end\n)\n\nif \"%1\" == \"clean\" (\n\tfor /d %%i in (%BUILDDIR%\\*) do rmdir /q /s %%i\n\tdel /q /s %BUILDDIR%\\*\n\tgoto end\n)\n\n\nREM Check if sphinx-build is available and fallback to Python version if any\n%SPHINXBUILD% 2> nul\nif errorlevel 9009 goto sphinx_python\ngoto sphinx_ok\n\n:sphinx_python\n\nset SPHINXBUILD=python -m sphinx.__init__\n%SPHINXBUILD% 2> nul\nif errorlevel 9009 (\n\techo.\n\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx\n\techo.installed, then set the SPHINXBUILD environment variable to point\n\techo.to the full path of the 'sphinx-build' executable. Alternatively you\n\techo.may add the Sphinx directory to PATH.\n\techo.\n\techo.If you don't have Sphinx installed, grab it from\n\techo.http://sphinx-doc.org/\n\texit /b 1\n)\n\n:sphinx_ok\n\n\nif \"%1\" == \"html\" (\n\t%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The HTML pages are in %BUILDDIR%/html.\n\tgoto end\n)\n\nif \"%1\" == \"dirhtml\" (\n\t%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.\n\tgoto end\n)\n\nif \"%1\" == \"singlehtml\" (\n\t%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.\n\tgoto end\n)\n\nif \"%1\" == \"pickle\" (\n\t%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished; now you can process the pickle files.\n\tgoto end\n)\n\nif \"%1\" == \"json\" (\n\t%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished; now you can process the JSON files.\n\tgoto end\n)\n\nif \"%1\" == \"htmlhelp\" (\n\t%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished; now you can run HTML Help Workshop with the ^\n.hhp project file in %BUILDDIR%/htmlhelp.\n\tgoto end\n)\n\nif \"%1\" == \"qthelp\" (\n\t%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished; now you can run \"qcollectiongenerator\" with the ^\n.qhcp project file in %BUILDDIR%/qthelp, like this:\n\techo.^> qcollectiongenerator %BUILDDIR%\\qthelp\\pytest-cookiecutterplugin_name.qhcp\n\techo.To view the help file:\n\techo.^> assistant -collectionFile %BUILDDIR%\\qthelp\\pytest-cookiecutterplugin_name.ghc\n\tgoto end\n)\n\nif \"%1\" == \"devhelp\" (\n\t%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished.\n\tgoto end\n)\n\nif \"%1\" == \"epub\" (\n\t%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The epub file is in %BUILDDIR%/epub.\n\tgoto end\n)\n\nif \"%1\" == \"latex\" (\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished; the LaTeX files are in %BUILDDIR%/latex.\n\tgoto end\n)\n\nif \"%1\" == \"latexpdf\" (\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\n\tcd %BUILDDIR%/latex\n\tmake all-pdf\n\tcd %~dp0\n\techo.\n\techo.Build finished; the PDF files are in %BUILDDIR%/latex.\n\tgoto end\n)\n\nif \"%1\" == \"latexpdfja\" (\n\t%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex\n\tcd %BUILDDIR%/latex\n\tmake all-pdf-ja\n\tcd %~dp0\n\techo.\n\techo.Build finished; the PDF files are in %BUILDDIR%/latex.\n\tgoto end\n)\n\nif \"%1\" == \"text\" (\n\t%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The text files are in %BUILDDIR%/text.\n\tgoto end\n)\n\nif \"%1\" == \"man\" (\n\t%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The manual pages are in %BUILDDIR%/man.\n\tgoto end\n)\n\nif \"%1\" == \"texinfo\" (\n\t%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.\n\tgoto end\n)\n\nif \"%1\" == \"gettext\" (\n\t%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The message catalogs are in %BUILDDIR%/locale.\n\tgoto end\n)\n\nif \"%1\" == \"changes\" (\n\t%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.The overview file is in %BUILDDIR%/changes.\n\tgoto end\n)\n\nif \"%1\" == \"linkcheck\" (\n\t%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Link check complete; look for any errors in the above output ^\nor in %BUILDDIR%/linkcheck/output.txt.\n\tgoto end\n)\n\nif \"%1\" == \"doctest\" (\n\t%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Testing of doctests in the sources finished, look at the ^\nresults in %BUILDDIR%/doctest/output.txt.\n\tgoto end\n)\n\nif \"%1\" == \"coverage\" (\n\t%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Testing of coverage in the sources finished, look at the ^\nresults in %BUILDDIR%/coverage/python.txt.\n\tgoto end\n)\n\nif \"%1\" == \"xml\" (\n\t%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The XML files are in %BUILDDIR%/xml.\n\tgoto end\n)\n\nif \"%1\" == \"pseudoxml\" (\n\t%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml\n\tif errorlevel 1 exit /b 1\n\techo.\n\techo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.\n\tgoto end\n)\n\n:end\n"
  },
  {
    "path": "docs/sources/operating.rst",
    "content": "==================\nOperating measures\n==================\n\nStorage\n-------\n\nOnce measures are collected, `pytest-monitor` dumps them either in a local database\nor sends them to a monitor server.\n\nIn the case of local storage, a `sqlite3` database is used, as it is lightweight and\nis provided with many Python distributions (being part of the standard library).\n\nMeasures are stored in the `pytest` invocation directory, in a database file named **.pymon**.\nYou are free to override the name of this database by setting the `--db` option:\n\n.. code-block:: shell\n\n    pytest --db /path/to/your/monitor/database\n\n\nYou can also sends your tests result to a monitor server (under development at that time) in order to centralize\nyour Metrics and Execution Context (see below):\n\n.. code-block:: shell\n\n    pytest --remote-server server:port\n\nExecution Context, Metrics and Session\n--------------------------------------\n\nWe distinguish two kinds of measures:\n\n    - those related to the **Execution Context**. This is related to your machine (node name, CPU, memory…),\n    - the **Metrics** related to the tests themselves (this can be the memory used, the CPU usage…).\n\nRegarding tests related **metrics**, one can see metrics which are tests independent and those which\nare session independent (session start date, scm reference). For this reason, `pytest-monitor` uses\na notion of session metrics to which each tests are linked to.\n\nAdditionally, each test is linked to an Execution Context so that comparisons between runs is possible.\n\n\nModel\n-----\n\nThe local database associates each test Metrics to the specific context in which it was run:\n\n.. image:: _static/db_relationship.png\n\n\nExecution Context\n~~~~~~~~~~~~~~~~~\n\nExecution Contexts are computed prior to the start of the `pytest`\nsession. An Execution Context describes much of the machine settings:\n\nCPU_COUNT (integer)\n    Number of online CPUs the machine can use.\nCPU_FREQUENCY_MHZ (integer)\n    Base frequency of the CPUs (in megahertz). Set to 0 if unable to fetch it.\nCPU_VENDOR (TEXT 256 CHAR)\n    Full CPU vendor string.\nRAM_TOTAL_MB (INTEGER)\n    Total usable RAM (physical memory) in megabytes.\nMACHINE_NODE (TEXT 512 CHAR)\n    Fully qualified domain name of the machine.\nMACHINE_TYPE (TEXT 32 CHAR)\n    Machine type.\nMACHINE_ARCH (TEXT 16 CHAR)\n    Mode used (64 bits…).\nSYSTEM_INFO (TEXT 256 CHAR)\n    Operating system name and release level.\nPYTHON_INFO (TEXT 512 CHAR)\n    Python information (version, compilation mode used and so on…)\nENV_H (TEXT 64 CHAR)\n    Hash string used to uniquely identify an execution context.\n\nIn the local database, Execution Contexts are stored in table `EXECUTION_CONTEXTS`.\n\n\nSessions\n--------\nSESSION_H (TEXT 64 CHAR)\n    Hash string used to uniquely identify a session run.\nRUN_DATE (TEXT 64 CHAR)\n    Time at which the `pytest` session was started. The full format is\n    'YYYY-MM-DDTHH:MM:SS.uuuuuu' (ISO 8601 format with UTC time). The fractional second part is omitted if it is zero.\nSCM_ID (TEXT 128 CHAR)\n    Full reference to the source code management system if any.\nRUN_DESCRIPTION (TEXT 1024 CHAR)\n    A free text field that you can use to describe a session run.\n\nIn the local database, Sessions are stored under the table `TEST_SESSIONS`.\n\n\nMetrics\n~~~~~~~\n\nMetrics are collected at test, class and/or module level. For both classes and modules, some of the\nmetrics can be skewed due to the technical limitations described earlier. \n\nSESSION_H (TEXT 64 CHAR)\n    Session context used for this test.\nENV_H (TEXT 64 CHAR)\n    Execution Context used for this test.\nITEM_START_TIME (TEXT 64 CHAR)\n    Time at which the item test was launched. The full format is\n    'YYYY-MM-DDTHH:MM:SS.uuuuuu' (ISO 8601 format with UTC time). The fractional second part is omitted if it is zero.\nITEM_PATH (TEXT 4096 CHAR)\n    Path of the item, using an import compatible string specification.\nITEM (TEXT 2096 CHAR)\n    Initial item name, without any variant.\nITEM_VARIANT varchar(2048)\n    Full item name, with parametrization used if any.\nITEM_FS_LOC varchar(2048)\n    Item's module path relative to pytest invocation directory.\nKIND (TEXT 64 CHAR)\n    Type of item (function, class, module…).\nCOMPONENT (TEXT 512 CHAR), NULLABLE\n    Component to which the test belongs, if any (this is used when sending results to a server, for identifying each source of Metrics).\nTOTAL_TIME (FLOAT)\n    Total time spent running the item (in seconds).\nUSER_TIME (FLOAT)\n    Time spent in User mode (in seconds).\nKERNEL_TIME (FLOAT)\n    Time spent in Kernel mode (in seconds).\nCPU_USAGE (FLOAT)\n    System-wide CPU usage as a percentage (100 % is equivalent to one core).\nMEM_USAGE (FLOAT)\n    Maximum resident memory used during the test execution (in megabytes).\n\nIn the local database, these Metrics are stored in table `TEST_METRICS`.\n"
  },
  {
    "path": "docs/sources/remote.rst",
    "content": "Use of a remote server\n======================\n\nYou can easily send your metrics to a remote server. This can turn usefull when it comes to running\ntests in parallel with plugins such as *pytest-xdist* of *pytest-parallel*.\nTo do so, instruct pytest with the remote server address to use:\n\n.. code-block:: shell\n\n   bash $> pytest --remote-server myremote.server.net:port \n\nThis way, *pytest-monitor* will automatically send and query the remote server as soon as it gets\na need.  Note that *pytest-monitor* will revert to a normal behaviour if:\n\n- it cannot query the context or the session for existence\n- it cannot create a new context or a new session\n\n\nImplementing a remote server\n============================\n\nHow pytest-monitor interacts with a remote server\n-------------------------------------------------\n\nThe following sequence is used by *pytest-monitor* when using a remote server:\n\n1. Ask the remote server if the **Execution Context** is known.\n2. Insert the **Execution Context** if the server knows nothing about it.\n3. Ask the remote server if the **Session** is known.\n4. Insert the **Session** if the server knows nothing about it.\n5. Insert results once measures have been collected.\n\nUsed HTTP codes\n---------------\nTwo codes are used by *pytest-monitor* when asked to work with a remote server:\n\n- 200 (OK) is used to indicate that a query has led to a non-empty result.\n- 201 (CREATED) is expected by *pytest-monitor** when sending a new entry (**Execution Context**, **Session** or any **Metric**).\n- 204 (NO CONTENT) though not checked explicitely should be returned when a request leads to no results.\n\nMandatory routes\n----------------\nThe following routes are expected to be reachable:\n\nGET /contexts/<str:hash>\n\n    Query the system for a **Execution Context** with the given hash.\n\n    **Return Codes**: Must return *200* (*OK*) if the **Execution Context** exists, *204* (*NO CONTENT*) otherwise\n\nGET /sessions/<str:hash>\n\n    Query the system for a **Session** with the given hash.\n    \n    **Return Codes**: Must return *200* (*OK*) if the **Session** exists, *204* (*NO CONTENT*) otherwise\n\nPOST /contexts/\n\n    Request the system to create a new entry for the given **Execution Context**.\n    Data are sent using Json parameters:\n\n    .. code-block:: json\n\n        {\n            cpu_count: int, \n            cpu_frequency: int, \n            cpu_type: str, \n            cpu_vendor: str, \n            ram_tota: int,\n            machine_node: str, \n            machine_type: str, \n            machine_arch: str, \n            system_info: str, \n            python_info: str, \n            h: str\n        }\n\n    **Return Codes**: Must return *201* (*CREATED*) if the **Execution Context** has been created\n\n\nPOST /sessions/\n\n    Request the system to create a new entry for the given **Session**.\n    Data are sent using Json parameters:\n\n    .. code-block:: json\n       \n       {\n           session_h: str,\n           run_date: str,\n           scm_ref: str,\n           description: str\n       }\n\n    **Return Codes**: Must return *201* (*CREATED*) if the **Session** has been created\n\nPOST /metrics/\n\n    Request the system to create a new **Metrics** entry. \n    Data are sent using Json parameters:\n\n    .. code-block:: json\n\n        {\n            session_h: str, \n            context_h: str, \n            item_start_time: str,\n            item_path: str,\n            item: str,\n            item_variant: str,\n            item_fs_loc: str,\n            kind: str, \n            component: str,\n            total_time: float,\n            user_time: float,\n            kernel_time: float,\n            cpu_usage: float,\n            mem_usage: float\n        }\n\n    **Return Codes**: Must return *201* (*CREATED*) if the **Metrics** has been created\n"
  },
  {
    "path": "docs/sources/run.rst",
    "content": "========================\nManaging your test suite\n========================\n\n`pytest-monitor` does not require any specific setup: it is active by default.\nThus all your tests are by default analyzed in order to collect monitored information.\n\n\nAbout collecting and storing results\n------------------------------------\n\n`pytest-monitor` makes a clear distinction between the execution context and the test metrics.\nThis distinction can been seen clearly in the code and the initialization sequence:\n\n1. Collect environment values.\n   Various pieces of information about the machine are collected.\n2. Store the context.\n   The Execution Context collected in step #1 is recorded if not yet known.\n3. Prepare the run.\n   In order to provide more accurate measurements, we \"warm up\" the context and take an initial set of measurements.\n   Some will be used for adjusting later measurements.\n4. Run tests and enable measurements.\n   Depending on the item type (function, class or module), we launch the relevant measurements.\n   Each time a monitored item ends, the measurement results (Metrics) are recorded right away.\n5. End session.\n   If sending the monitoring results to a remote server has been requested, this is when `pytest-monitor` does it.\n\n\nSelecting tests to monitor\n--------------------------\n\nBy default, all tests are monitored, even small ones which would not require any specific monitoring.\nIt is possible to control more finely which tests will be monitored by `pytest-monitor`. This is done through the use of `pytest` markers.\n\n`pytest-monitor` offers two markers for this:\n\n``@pytest.mark.monitor_skip_test``\n  marks your test for execution, but without any monitoring.\n\n``@pytest.mark.monitor_skip_test_if(cond)``\n  tells `pytest-monitor` to execute the test but to monitor results\n  if and only if the condition is true.\n\nHere is an example:\n\n.. code-block:: python\n\n    import pytest\n    import sys\n\n\n    def test_execute_and_monitor():\n        assert True\n\n    @pytest.mark.monitor_skip_test\n    def test_execute_do_not_monitor():\n        assert True\n\n    @pytest.mark.monitor_skip_test_if(sys.version_info >= (3,))\n    def test_execute_and_monitor_py3_or_above():\n        assert True\n\n\nDisabling monitoring except for some tests\n------------------------------------------\n\n`pytest` offers global markers. For example, one can set the default to no monitoring:\n\n.. code-block:: python\n\n    import pytest\n\n    # With the following global module marker,\n    # monitoring is disabled by default:\n    pytestmark = [pytest.mark.monitor_skip_test]\n\nIn this case, it is necessary to explicitly activate individual monitoring. This is\naccomplished with:\n\n``@pytest.mark.monitor_test``\n  marks your test as to be executed and monitored, even if monitoring\n  is disabled for the module.\n\n``@pytest.mark.monitor_test_if(cond)``\n  tells `pytest-monitor` to execute the test and to monitor results\n  if and only if the condition is true, regardless of the\n  module monitor setup.\n\n\nContinuing the example above:\n\n.. code-block:: python\n\n    import time\n    import sys\n\n\n    def test_executed_not_monitored():\n        time.sleep(1)\n        assert True\n\n    def test_executed_not_monitored_2():\n        time.sleep(2)\n        assert True\n\n    @pytest.mark.monitor_test\n    def test_executed_and_monitored():\n        assert True\n\n    @pytest.mark.monitor_test_if(sys.version_info >= (3, 7))\n    def test_executed_and_monitored_if_py37():\n        assert True\n\n\nAssociating your tests to a component\n-------------------------------------\n\n`pytest-monitor` allows you to *tag* each test in the database with a \"**component**\" name. This allows you to identify easily tests that come from a specific part of your application, or for distinguishing test results for two different projects that use the same `pytest-monitor` database.\n\nSetting up a component name can be done at module level:\n\n.. code-block:: python\n\n    import time\n    import pytest\n\n\n    pytest_monitor_component = \"my_component\"  # Component name stored in the results database\n\n    def test_monitored():\n        t_a = time.time()\n        b_continue = True\n        while b_continue:\n            t_delta = time.time() - t_a\n            b_continue = t_delta < 1\n        assert not b_continue\n\nIf no `pytest_monitor_component` variable is defined, the component is set to the empty string.\nIn projects with many modules, this can be tedious. `pytest-monitor` therefore allows you to force a fixed component name for the all the tests:\n\n.. code-block:: bash\n\n   $ pytest --force-component YOUR_COMPONENT_NAME\n\nThis will force the component value to be set to the one you provided, whatever the value of\n*pytest_monitor_component* in your test module, if any.\n\nIf you need to use a global component name for all your tests while allowing some modules to have a specific component name, you can ask `pytest-monitor` to add a prefix to any module-level component name:\n\n.. code-block:: bash\n\n   $ pytest --component-prefix YOUR_COMPONENT_NAME\n\nThis way, all tests detected by `pytest` will have their component prefixed with the given value (tests for modules with no `pytest_monitor_component` variable are simply tagged with the prefix).\n\nFor instance the following test module:\n\n.. code-block:: python\n\n    import time\n    import pytest\n\n\n    pytest_monitor_component = \"component_A\"\n\n    def test_monitored():\n        t_a = time.time()\n        b_continue = True\n        while b_continue:\n            t_delta = time.time() - t_a\n            b_continue = t_delta < 1\n        assert not b_continue\n\nwill yield the following value for the component fields, depending on the chosen command-line option:\n\n+------------------------------------------+-----------------------+\n|   Command line used                      |    Component value    |\n+==========================================+=======================+\n| pytest --force-component PROJECT_A       |       PROJECT_A       |\n+------------------------------------------+-----------------------+\n| pytest --component-prefix PROJECT_A      | PROJECT_A.component_A |\n+------------------------------------------+-----------------------+\n\n"
  },
  {
    "path": "examples/pkg1/__init__.py",
    "content": ""
  },
  {
    "path": "examples/pkg1/test_mod1.py",
    "content": "import time\n\nimport pytest\n\n\ndef test_sleep1():\n    time.sleep(1)\n\n\n@pytest.mark.monitor_skip_test()\ndef test_sleep2():\n    time.sleep(2)\n\n\n@pytest.mark.parametrize((\"range_max\", \"other\"), [(10, \"10\"), (100, \"100\"), (1000, \"1000\"), (10000, \"10000\")])\ndef test_heavy(range_max, other):\n    assert len([\"a\" * i for i in range(range_max)]) == range_max\n"
  },
  {
    "path": "examples/pkg1/test_mod2.py",
    "content": "import time\n\n\ndef test_sleep_400ms():\n    time.sleep(0.4)\n"
  },
  {
    "path": "examples/pkg2/__init__.py",
    "content": ""
  },
  {
    "path": "examples/pkg2/test_mod_a.py",
    "content": "import time\n\n\ndef test_master_sleep():\n    t_a = time.time()\n    b_continue = True\n    while b_continue:\n        t_delta = time.time() - t_a\n        b_continue = t_delta < 5\n"
  },
  {
    "path": "examples/pkg3/__init__.py",
    "content": ""
  },
  {
    "path": "examples/pkg3/test_mod_cl.py",
    "content": "import time\n\n\nclass TestClass:\n    def setup_method(self, test_method):\n        self.__value = test_method.__name__\n        time.sleep(1)\n\n    def test_method1(self):\n        time.sleep(0.5)\n        assert self.__value == \"test_method1\"\n"
  },
  {
    "path": "examples/pkg4/__init__.py",
    "content": ""
  },
  {
    "path": "examples/pkg4/test_mod_a.py",
    "content": "import time\n\nimport pytest\n\npytestmark = pytest.mark.monitor_skip_test\n\npytest_monitor_component = \"test\"\n\n\ndef test_not_monitored():\n    t_a = time.time()\n    b_continue = True\n    while b_continue:\n        t_delta = time.time() - t_a\n        b_continue = t_delta < 5\n\n\n@pytest.mark.monitor_test()\ndef test_force_monitor():\n    t_a = time.time()\n    b_continue = True\n    while b_continue:\n        t_delta = time.time() - t_a\n        b_continue = t_delta < 5\n"
  },
  {
    "path": "examples/pkg5/__init__.py",
    "content": ""
  },
  {
    "path": "examples/pkg5/doctest.py",
    "content": "def run(a, b):\n    \"\"\"\n    >>> a = 3\n    >>> b = 30\n    >>> run(a, b)\n    33\n    \"\"\"\n    return a + b\n\n\ndef try_doctest():\n    \"\"\"\n    >>> try_doctest()\n    33\n    \"\"\"\n    return run(3, 30)\n"
  },
  {
    "path": "examples/pkg5/test_special_pytest.py",
    "content": "import pytest\n\n\n@pytest.mark.skip(reason=\"Some special test to skip\")\ndef test_is_skipped():\n    assert True\n\n\ndef test_that_one_is_skipped_too():\n    pytest.skip(\"Test executed and instructed to be skipped from its body\")\n\n\ndef test_import_or_skip():\n    pytest.importorskip(\"this_module_does_not_exists\")\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[tool.distutils.bdist_wheel]\nuniversal = false\n\n[project]\nname = \"pytest-monitor\"\nauthors = [\n    {name = \"Jean-Sébastien Dieu\", email = \"dieu.jsebastien@yahoo.com\"},\n]\nclassifiers = [\n    \"Development Status :: 5 - Production/Stable\",\n    \"Framework :: Pytest\",\n    \"Intended Audience :: Developers\",\n    \"Topic :: Software Development :: Testing\",\n    \"Programming Language :: Python\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3.8\",\n    \"Programming Language :: Python :: 3.9\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: Implementation :: CPython\",\n    \"Programming Language :: Python :: Implementation :: PyPy\",\n    \"Operating System :: OS Independent\",\n    \"License :: OSI Approved :: MIT License\",\n]\ndependencies = [\n    \"pytest\",\n    \"requests\",\n    \"psutil>=5.1.0\",\n    \"memory_profiler>=0.58\",\n    \"wheel\",\n]\ndescription = \"A pytest plugin designed for analyzing resource usage during tests.\"\nlicense = {text = \"MIT\"}\nmaintainers = [\n    {name = \"Jean-Sébastien Dieu\", email = \"dieu.jsebastien@yahoo.com\"},\n]\nreadme = \"README.rst\"\nrequires-python = \">=3.8\"\nversion = \"1.7.0\"\n\n[project.urls]\n\"Source\" = \"https://github.com/CFMTech/pytest-monitor\"\n\"Tracker\" = \"https://github.com/CFMTech/pytest-monitor/issues\"\n\"Documentation\" = \"https://pytest-monitor.readthedocs.io/\"\n\"Homepage\" = \"https://pytest-monitor.readthedocs.io/\"\n\n[project.entry-points.pytest11]\nmonitor = \"pytest_monitor.pytest_monitor\"\n\n[project.optional-dependencies]\ndev = [\n    \"black\",\n    \"isort\",\n    \"flake8==6.0.0\",\n    \"flake8-builtins==2.1.0\",\n    \"flake8-simplify==0.19.3\",\n    \"flake8-comprehensions==3.10.1\",\n    \"flake8-pytest-style==1.6.0\",\n    \"flake8-return==1.2.0\",\n    \"flake8-simplify==0.19.3\",\n    \"flake8-pyproject==1.2.3\",\n    \"pre-commit==3.3.3\"\n]\n\n[tool.flake8]\nmax-line-length = 120\n\n[tool.black]\nline-length = 120\n\n[tool.isort]\nprofile = \"black\"\nsrc_paths = [\"pytest_monitor\"]\n"
  },
  {
    "path": "pytest_monitor/__init__.py",
    "content": "import importlib.metadata\n\n__version__ = importlib.metadata.version(\"pytest-monitor\")\n"
  },
  {
    "path": "pytest_monitor/handler.py",
    "content": "import sqlite3\n\n\nclass DBHandler:\n    def __init__(self, db_path):\n        self.__db = db_path\n        self.__cnx = sqlite3.connect(self.__db) if db_path else None\n        self.prepare()\n\n    def query(self, what, bind_to, many=False):\n        cursor = self.__cnx.cursor()\n        cursor.execute(what, bind_to)\n        return cursor.fetchall() if many else cursor.fetchone()\n\n    def insert_session(self, h, run_date, scm_id, description):\n        with self.__cnx:\n            self.__cnx.execute(\n                \"insert into TEST_SESSIONS(SESSION_H, RUN_DATE, SCM_ID, RUN_DESCRIPTION)\" \" values (?,?,?,?)\",\n                (h, run_date, scm_id, description),\n            )\n\n    def insert_metric(\n        self,\n        session_id,\n        env_id,\n        item_start_date,\n        item,\n        item_path,\n        item_variant,\n        item_loc,\n        kind,\n        component,\n        total_time,\n        user_time,\n        kernel_time,\n        cpu_usage,\n        mem_usage,\n    ):\n        with self.__cnx:\n            self.__cnx.execute(\n                \"insert into TEST_METRICS(SESSION_H,ENV_H,ITEM_START_TIME,ITEM,\"\n                \"ITEM_PATH,ITEM_VARIANT,ITEM_FS_LOC,KIND,COMPONENT,TOTAL_TIME,\"\n                \"USER_TIME,KERNEL_TIME,CPU_USAGE,MEM_USAGE) \"\n                \"values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",\n                (\n                    session_id,\n                    env_id,\n                    item_start_date,\n                    item,\n                    item_path,\n                    item_variant,\n                    item_loc,\n                    kind,\n                    component,\n                    total_time,\n                    user_time,\n                    kernel_time,\n                    cpu_usage,\n                    mem_usage,\n                ),\n            )\n\n    def insert_execution_context(self, exc_context):\n        with self.__cnx:\n            self.__cnx.execute(\n                \"insert into EXECUTION_CONTEXTS(CPU_COUNT,CPU_FREQUENCY_MHZ,CPU_TYPE,CPU_VENDOR,\"\n                \"RAM_TOTAL_MB,MACHINE_NODE,MACHINE_TYPE,MACHINE_ARCH,SYSTEM_INFO,\"\n                \"PYTHON_INFO,ENV_H) values (?,?,?,?,?,?,?,?,?,?,?)\",\n                (\n                    exc_context.cpu_count,\n                    exc_context.cpu_frequency,\n                    exc_context.cpu_type,\n                    exc_context.cpu_vendor,\n                    exc_context.ram_total,\n                    exc_context.fqdn,\n                    exc_context.machine,\n                    exc_context.architecture,\n                    exc_context.system_info,\n                    exc_context.python_info,\n                    exc_context.compute_hash(),\n                ),\n            )\n\n    def prepare(self):\n        cursor = self.__cnx.cursor()\n        cursor.execute(\n            \"\"\"\nCREATE TABLE IF NOT EXISTS TEST_SESSIONS(\n    SESSION_H varchar(64) primary key not null unique, -- Session identifier\n    RUN_DATE varchar(64), -- Date of test run\n    SCM_ID varchar(128), -- SCM change id\n    RUN_DESCRIPTION json\n);\"\"\"\n        )\n        cursor.execute(\n            \"\"\"\nCREATE TABLE IF NOT EXISTS TEST_METRICS (\n    SESSION_H varchar(64), -- Session identifier\n    ENV_H varchar(64), -- Environment description identifier\n    ITEM_START_TIME varchar(64), -- Effective start time of the test\n    ITEM_PATH varchar(4096), -- Path of the item, following Python import specification\n    ITEM varchar(2048), -- Name of the item\n    ITEM_VARIANT varchar(2048), -- Optional parametrization of an item.\n    ITEM_FS_LOC varchar(2048), -- Relative path from pytest invocation directory to the item's module.\n    KIND varchar(64), -- Package, Module or function\n    COMPONENT varchar(512) NULL, -- Tested component if any\n    TOTAL_TIME float, -- Total time spent running the item\n    USER_TIME float, -- time spent in user space\n    KERNEL_TIME float, -- time spent in kernel space\n    CPU_USAGE float, -- cpu usage\n    MEM_USAGE float, -- Max resident memory used.\n    FOREIGN KEY (ENV_H) REFERENCES EXECUTION_CONTEXTS(ENV_H),\n    FOREIGN KEY (SESSION_H) REFERENCES TEST_SESSIONS(SESSION_H)\n);\"\"\"\n        )\n        cursor.execute(\n            \"\"\"\nCREATE TABLE IF NOT EXISTS EXECUTION_CONTEXTS (\n   ENV_H varchar(64) primary key not null unique,\n   CPU_COUNT integer,\n   CPU_FREQUENCY_MHZ integer,\n   CPU_TYPE varchar(64),\n   CPU_VENDOR varchar(256),\n   RAM_TOTAL_MB integer,\n   MACHINE_NODE varchar(512),\n   MACHINE_TYPE varchar(32),\n   MACHINE_ARCH varchar(16),\n   SYSTEM_INFO varchar(256),\n   PYTHON_INFO varchar(512)\n);\n\"\"\"\n        )\n        self.__cnx.commit()\n"
  },
  {
    "path": "pytest_monitor/pytest_monitor.py",
    "content": "# -*- coding: utf-8 -*-\nimport gc\nimport time\nimport warnings\n\nimport memory_profiler\nimport pytest\n\nfrom pytest_monitor.session import PyTestMonitorSession\n\n# These dictionaries are used to compute members set on each items.\n# KEY is the marker set on a test function\n# value is a tuple:\n#  expect_args: boolean\n#  internal marker attribute name: str\n#  callable that set member's value\n#  default value\nPYTEST_MONITOR_VALID_MARKERS = {\n    \"monitor_skip_test\": (False, \"monitor_skip_test\", lambda x: True, False),\n    \"monitor_skip_test_if\": (True, \"monitor_skip_test\", lambda x: bool(x), False),\n    \"monitor_test\": (False, \"monitor_force_test\", lambda x: True, False),\n    \"monitor_test_if\": (True, \"monitor_force_test\", lambda x: bool(x), False),\n}\nPYTEST_MONITOR_DEPRECATED_MARKERS = {}\nPYTEST_MONITOR_ITEM_LOC_MEMBER = \"_location\" if tuple(pytest.__version__.split(\".\")) < (\"5\", \"3\") else \"location\"\n\nPYTEST_MONITORING_ENABLED = True\n\n\ndef pytest_addoption(parser):\n    group = parser.getgroup(\"monitor\")\n    group.addoption(\n        \"--restrict-scope-to\",\n        dest=\"mtr_scope\",\n        default=\"function\",\n        help=\"Select the scope to monitor. By default, only function is monitored.\"\n        \"Values are function, class, module, session. You can set one or more of these\"\n        \"by listing them using a comma separated list\",\n    )\n    group.addoption(\n        \"--parametrization-explicit\",\n        dest=\"mtr_want_explicit_ids\",\n        action=\"store_true\",\n        help=\"Set this option to distinguish parametrized tests given their values.\"\n        \" This requires the parameters to be stringifiable.\",\n    )\n    group.addoption(\"--no-monitor\", action=\"store_true\", dest=\"mtr_none\", help=\"Disable all traces\")\n    group.addoption(\n        \"--remote-server\",\n        action=\"store\",\n        dest=\"mtr_remote\",\n        help=\"Remote server to send the results to. Format is <ADRESS>:<PORT>\",\n    )\n    group.addoption(\n        \"--db\",\n        action=\"store\",\n        dest=\"mtr_db_out\",\n        default=\".pymon\",\n        help=\"Use the given sqlite database for storing results.\",\n    )\n    group.addoption(\n        \"--no-db\",\n        action=\"store_true\",\n        dest=\"mtr_no_db\",\n        help=\"Do not store results in local db.\",\n    )\n    group.addoption(\n        \"--force-component\",\n        action=\"store\",\n        dest=\"mtr_force_component\",\n        help=\"Force the component to be set at the given value for the all tests run\" \" in this session.\",\n    )\n    group.addoption(\n        \"--component-prefix\",\n        action=\"store\",\n        dest=\"mtr_component_prefix\",\n        help=\"Prefix each found components with the given value (applies to all tests\" \" run in this session).\",\n    )\n    group.addoption(\n        \"--no-gc\",\n        action=\"store_true\",\n        dest=\"mtr_disable_gc\",\n        help=\"Disable garbage collection between tests (may leads to non reliable measures)\",\n    )\n    group.addoption(\n        \"--description\",\n        action=\"store\",\n        default=\"\",\n        dest=\"mtr_description\",\n        help=\"Use this option to provide a small summary about this run.\",\n    )\n    group.addoption(\n        \"--tag\",\n        action=\"append\",\n        dest=\"mtr_tags\",\n        default=[],\n        help=\"Provide meaningfull flags to your run. This can help you in your analysis.\",\n    )\n\n\ndef pytest_configure(config):\n    config.addinivalue_line(\"markers\", \"monitor_skip_test: mark test to be executed but not monitored.\")\n    config.addinivalue_line(\n        \"markers\",\n        \"monitor_skip_test_if(cond): mark test to be executed but \" \"not monitored if cond is verified.\",\n    )\n    config.addinivalue_line(\n        \"markers\",\n        \"monitor_test: mark test to be monitored (default behaviour).\"\n        \" This can turn handy to whitelist some test when you have disabled\"\n        \" monitoring on a whole module.\",\n    )\n    config.addinivalue_line(\n        \"markers\",\n        \"monitor_test_if(cond): mark test to be monitored if and only if cond\"\n        \" is verified. This can help you in whitelisting tests to be monitored\"\n        \" depending on some external conditions.\",\n    )\n\n\ndef pytest_runtest_setup(item):\n    \"\"\"\n    Validate marker setup and print warnings if usage of deprecated marker is identified.\n    Setting marker attribute to the discovered item is done after the above described verification.\n    :param item: Test item\n    \"\"\"\n    if not PYTEST_MONITORING_ENABLED:\n        return\n    item_markers = {mark.name: mark for mark in item.iter_markers() if mark and mark.name.startswith(\"monitor_\")}\n    mark_to_del = []\n    for set_marker in item_markers.keys():\n        if set_marker not in PYTEST_MONITOR_VALID_MARKERS:\n            warnings.warn(\"Nothing known about marker {}. Marker will be dropped.\".format(set_marker))\n            mark_to_del.append(set_marker)\n        if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS:\n            warnings.warn(f\"Marker {set_marker} is deprecated. Consider upgrading your tests\")\n\n    for marker in mark_to_del:\n        del item_markers[marker]\n\n    all_valid_markers = PYTEST_MONITOR_VALID_MARKERS\n    all_valid_markers.update(PYTEST_MONITOR_DEPRECATED_MARKERS)\n    # Setting instantiated markers\n    for marker, _ in item_markers.items():\n        with_args, attr, fun_val, _ = all_valid_markers[marker]\n        attr_val = fun_val(item_markers[marker].args[0]) if with_args else fun_val(None)\n        setattr(item, attr, attr_val)\n\n    # Setting other markers to default values\n    for marker, marker_value in all_valid_markers.items():\n        with_args, attr, _, default = marker_value\n        if not hasattr(item, attr):\n            setattr(item, attr, default)\n\n    # Finalize marker processing by enforcing some marker's value\n    if item.monitor_force_test:\n        # This test has been explicitly flagged as 'to be monitored'.\n        item.monitor_skip_test = False\n\n\n@pytest.hookimpl(tryfirst=True, hookwrapper=True)\ndef pytest_runtest_makereport(item, call):\n    \"\"\"\n    Used to identify the current call to add times.\n    :param item: Test item\n    :param call: call instance associated to the given item\n    \"\"\"\n    outcome = yield\n    rep = outcome.get_result()\n\n    if rep.when == \"call\":\n        setattr(item, \"test_run_duration\", call.stop - call.start)\n        setattr(item, \"test_effective_start_time\", call.start)\n\n\ndef pytest_runtest_call(item):\n    if not PYTEST_MONITORING_ENABLED:\n        return\n    setattr(item, \"monitor_results\", False)\n    if hasattr(item, \"module\"):\n        setattr(\n            item,\n            \"monitor_component\",\n            getattr(item.module, \"pytest_monitor_component\", \"\"),\n        )\n    else:\n        setattr(item, \"monitor_skip_test\", True)\n\n\n@pytest.hookimpl\ndef pytest_pyfunc_call(pyfuncitem):\n    \"\"\"\n    Core sniffer logic. We encapsulate the test function in a sniffer function to collect\n    memory results.\n    \"\"\"\n\n    def wrapped_function():\n        try:\n            funcargs = pyfuncitem.funcargs\n            testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}\n            pyfuncitem.obj(**testargs)\n        except Exception:\n            raise\n        except BaseException as e:\n            return e\n\n    def prof():\n        m = memory_profiler.memory_usage((wrapped_function, ()), max_iterations=1, max_usage=True, retval=True)\n        if isinstance(m[1], BaseException):  # Do we have any outcome?\n            raise m[1]\n        memuse = m[0][0] if type(m[0]) is list else m[0]\n        setattr(pyfuncitem, \"mem_usage\", memuse)\n        setattr(pyfuncitem, \"monitor_results\", True)\n\n    if not PYTEST_MONITORING_ENABLED:\n        wrapped_function()\n    else:\n        if not pyfuncitem.session.config.option.mtr_disable_gc:\n            gc.collect()\n        prof()\n    return True\n\n\ndef pytest_make_parametrize_id(config, val, argname):\n    if config.option.mtr_want_explicit_ids:\n        return f\"{argname}={val}\"\n    return None\n\n\n@pytest.hookimpl(hookwrapper=True)\ndef pytest_sessionstart(session):\n    \"\"\"\n    Instantiate a monitor session to save collected metrics.\n    We yield at the end to let pytest pursue the execution.\n    \"\"\"\n    if session.config.option.mtr_force_component and session.config.option.mtr_component_prefix:\n        raise pytest.UsageError(\"Invalid usage: --force-component and --component-prefix are incompatible options!\")\n    if session.config.option.mtr_no_db and not session.config.option.mtr_remote and not session.config.option.mtr_none:\n        warnings.warn(\"pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.\")\n        session.config.option.mtr_none = True\n    component = session.config.option.mtr_force_component or session.config.option.mtr_component_prefix\n    if session.config.option.mtr_component_prefix:\n        component += \".{user_component}\"\n    if not component:\n        component = \"{user_component}\"\n    db = (\n        None\n        if (session.config.option.mtr_none or session.config.option.mtr_no_db)\n        else session.config.option.mtr_db_out\n    )\n    remote = None if session.config.option.mtr_none else session.config.option.mtr_remote\n    session.pytest_monitor = PyTestMonitorSession(\n        db=db, remote=remote, component=component, scope=session.config.option.mtr_scope\n    )\n    global PYTEST_MONITORING_ENABLED\n    PYTEST_MONITORING_ENABLED = not session.config.option.mtr_none\n    session.pytest_monitor.compute_info(session.config.option.mtr_description, session.config.option.mtr_tags)\n    yield\n\n\n@pytest.fixture(autouse=True, scope=\"module\")\ndef _prf_module_tracer(request):\n    if not PYTEST_MONITORING_ENABLED:\n        yield\n    else:\n        t_a = time.time()\n        ptimes_a = request.session.pytest_monitor.process.cpu_times()\n        yield\n        ptimes_b = request.session.pytest_monitor.process.cpu_times()\n        t_z = time.time()\n        rss = request.session.pytest_monitor.process.memory_info().rss / 1024**2\n        component = getattr(request.module, \"pytest_monitor_component\", \"\")\n        item = request.node.name[:-3]\n        pypath = request.module.__name__[: -len(item) - 1]\n        request.session.pytest_monitor.add_test_info(\n            item,\n            pypath,\n            \"\",\n            request.node._nodeid,\n            \"module\",\n            component,\n            t_a,\n            t_z - t_a,\n            ptimes_b.user - ptimes_a.user,\n            ptimes_b.system - ptimes_a.system,\n            rss,\n        )\n\n\n@pytest.fixture(autouse=True)\ndef _prf_tracer(request):\n    if not PYTEST_MONITORING_ENABLED:\n        yield\n    else:\n        ptimes_a = request.session.pytest_monitor.process.cpu_times()\n        yield\n        ptimes_b = request.session.pytest_monitor.process.cpu_times()\n        if not request.node.monitor_skip_test and getattr(request.node, \"monitor_results\", False):\n            item_name = request.node.originalname or request.node.name\n            item_loc = getattr(request.node, PYTEST_MONITOR_ITEM_LOC_MEMBER)[0]\n            request.session.pytest_monitor.add_test_info(\n                item_name,\n                request.module.__name__,\n                request.node.name,\n                item_loc,\n                \"function\",\n                request.node.monitor_component,\n                request.node.test_effective_start_time,\n                request.node.test_run_duration,\n                ptimes_b.user - ptimes_a.user,\n                ptimes_b.system - ptimes_a.system,\n                request.node.mem_usage,\n            )\n"
  },
  {
    "path": "pytest_monitor/session.py",
    "content": "import datetime\nimport hashlib\nimport json\nimport os\nimport warnings\nfrom http import HTTPStatus\n\nimport memory_profiler\nimport psutil\nimport requests\n\nfrom pytest_monitor.handler import DBHandler\nfrom pytest_monitor.sys_utils import (\n    ExecutionContext,\n    collect_ci_info,\n    determine_scm_revision,\n)\n\n\nclass PyTestMonitorSession:\n    def __init__(self, db=None, remote=None, component=\"\", scope=None, tracing=True):\n        self.__db = None\n        if db:\n            self.__db = DBHandler(db)\n        self.__monitor_enabled = tracing\n        self.__remote = remote\n        self.__component = component\n        self.__session = \"\"\n        self.__scope = scope or []\n        self.__eid = (None, None)\n        self.__mem_usage_base = None\n        self.__process = psutil.Process(os.getpid())\n\n    @property\n    def monitoring_enabled(self):\n        return self.__monitor_enabled\n\n    @property\n    def remote_env_id(self):\n        return self.__eid[1]\n\n    @property\n    def db_env_id(self):\n        return self.__eid[0]\n\n    @property\n    def process(self):\n        return self.__process\n\n    def get_env_id(self, env):\n        db, remote = None, None\n        if self.__db:\n            row = self.__db.query(\"SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?\", (env.compute_hash(),))\n            db = row[0] if row else None\n        if self.__remote:\n            r = requests.get(f\"{self.__remote}/contexts/{env.compute_hash()}\")\n            remote = None\n            if r.status_code == HTTPStatus.OK:\n                remote = json.loads(r.text)\n                if remote[\"contexts\"]:\n                    remote = remote[\"contexts\"][0][\"h\"]\n                else:\n                    remote = None\n        return db, remote\n\n    def compute_info(self, description, tags):\n        run_date = datetime.datetime.now().isoformat()\n        scm = determine_scm_revision()\n        h = hashlib.md5()\n        h.update(scm.encode())\n        h.update(run_date.encode())\n        h.update(description.encode())\n        self.__session = h.hexdigest()\n        # From description + tags to JSON format\n        d = collect_ci_info()\n        if description:\n            d[\"description\"] = description\n        for tag in tags:\n            if type(tag) is str:\n                _tag_info = tag.split(\"=\", 1)\n                d[_tag_info[0]] = _tag_info[1]\n            else:\n                for sub_tag in tag:\n                    _tag_info = sub_tag.split(\"=\", 1)\n                    d[_tag_info[0]] = _tag_info[1]\n        description = json.dumps(d)\n        # Now get memory usage base and create the database\n        self.prepare()\n        self.set_environment_info(ExecutionContext())\n        if self.__db:\n            self.__db.insert_session(self.__session, run_date, scm, description)\n        if self.__remote:\n            r = requests.post(\n                f\"{self.__remote}/sessions/\",\n                json={\n                    \"session_h\": self.__session,\n                    \"run_date\": run_date,\n                    \"scm_ref\": scm,\n                    \"description\": json.loads(description),\n                },\n            )\n            if r.status_code != HTTPStatus.CREATED:\n                self.__remote = \"\"\n                msg = f\"Cannot insert session in remote monitor server ({r.status_code})! Deactivating...')\"\n                warnings.warn(msg)\n\n    def set_environment_info(self, env):\n        self.__eid = self.get_env_id(env)\n        db_id, remote_id = self.__eid\n        if self.__db and db_id is None:\n            self.__db.insert_execution_context(env)\n            db_id = self.__db.query(\"select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?\", (env.compute_hash(),))[0]\n        if self.__remote and remote_id is None:\n            # We must postpone that to be run at the end of the pytest session.\n            r = requests.post(f\"{self.__remote}/contexts/\", json=env.to_dict())\n            if r.status_code != HTTPStatus.CREATED:\n                warnings.warn(f\"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating...\")\n                self.__remote = \"\"\n            else:\n                remote_id = json.loads(r.text)[\"h\"]\n        self.__eid = db_id, remote_id\n\n    def prepare(self):\n        def dummy():\n            return True\n\n        memuse = memory_profiler.memory_usage((dummy,), max_iterations=1, max_usage=True)\n        self.__mem_usage_base = memuse[0] if type(memuse) is list else memuse\n\n    def add_test_info(\n        self,\n        item,\n        item_path,\n        item_variant,\n        item_loc,\n        kind,\n        component,\n        item_start_time,\n        total_time,\n        user_time,\n        kernel_time,\n        mem_usage,\n    ):\n        if kind not in self.__scope:\n            return\n        mem_usage = float(mem_usage) - self.__mem_usage_base\n        cpu_usage = (user_time + kernel_time) / total_time\n        item_start_time = datetime.datetime.fromtimestamp(item_start_time).isoformat()\n        final_component = self.__component.format(user_component=component)\n        if final_component.endswith(\".\"):\n            final_component = final_component[:-1]\n        item_variant = item_variant.replace(\"-\", \", \")  # No choice\n        if self.__db and self.db_env_id is not None:\n            self.__db.insert_metric(\n                self.__session,\n                self.db_env_id,\n                item_start_time,\n                item,\n                item_path,\n                item_variant,\n                item_loc,\n                kind,\n                final_component,\n                total_time,\n                user_time,\n                kernel_time,\n                cpu_usage,\n                mem_usage,\n            )\n        if self.__remote and self.remote_env_id is not None:\n            r = requests.post(\n                f\"{self.__remote}/metrics/\",\n                json={\n                    \"session_h\": self.__session,\n                    \"context_h\": self.remote_env_id,\n                    \"item_start_time\": item_start_time,\n                    \"item_path\": item_path,\n                    \"item\": item,\n                    \"item_variant\": item_variant,\n                    \"item_fs_loc\": item_loc,\n                    \"kind\": kind,\n                    \"component\": final_component,\n                    \"total_time\": total_time,\n                    \"user_time\": user_time,\n                    \"kernel_time\": kernel_time,\n                    \"cpu_usage\": cpu_usage,\n                    \"mem_usage\": mem_usage,\n                },\n            )\n            if r.status_code != HTTPStatus.CREATED:\n                self.__remote = \"\"\n                msg = f\"Cannot insert values in remote monitor server ({r.status_code})! Deactivating...')\"\n                warnings.warn(msg)\n"
  },
  {
    "path": "pytest_monitor/sys_utils.py",
    "content": "import hashlib\nimport multiprocessing\nimport os\nimport platform\nimport socket\nimport subprocess\nimport sys\nimport warnings\n\nimport psutil\n\n\ndef collect_ci_info():\n    # Test for jenkins\n    if \"BUILD_NUMBER\" in os.environ and (\"BRANCH_NAME\" in os.environ or \"JOB_NAME\" in os.environ):\n        br = os.environ[\"BRANCH_NAME\"] if \"BRANCH_NAME\" in os.environ else os.environ[\"JOB_NAME\"]\n        return {\n            \"pipeline_branch\": br,\n            \"pipeline_build_no\": os.environ[\"BUILD_NUMBER\"],\n            \"__ci__\": \"jenkinsci\",\n        }\n    # Test for CircleCI\n    if \"CIRCLE_JOB\" in os.environ and \"CIRCLE_BUILD_NUM\" in os.environ:\n        return {\n            \"pipeline_branch\": os.environ[\"CIRCLE_JOB\"],\n            \"pipeline_build_no\": os.environ[\"CIRCLE_BUILD_NUM\"],\n            \"__ci__\": \"circleci\",\n        }\n    # Test for TravisCI\n    if \"TRAVIS_BUILD_NUMBER\" in os.environ and \"TRAVIS_BUILD_ID\" in os.environ:\n        return {\n            \"pipeline_branch\": os.environ[\"TRAVIS_BUILD_ID\"],\n            \"pipeline_build_no\": os.environ[\"TRAVIS_BUILD_NUMBER\"],\n            \"__ci__\": \"travisci\",\n        }\n    # Test for DroneCI\n    if \"DRONE_REPO_BRANCH\" in os.environ and \"DRONE_BUILD_NUMBER\" in os.environ:\n        return {\n            \"pipeline_branch\": os.environ[\"DRONE_REPO_BRANCH\"],\n            \"pipeline_build_no\": os.environ[\"DRONE_BUILD_NUMBER\"],\n            \"__ci__\": \"droneci\",\n        }\n    # Test for Gitlab CI\n    if \"CI_JOB_NAME\" in os.environ and \"CI_PIPELINE_ID\" in os.environ:\n        return {\n            \"pipeline_branch\": os.environ[\"CI_JOB_NAME\"],\n            \"pipeline_build_no\": os.environ[\"CI_PIPELINE_ID\"],\n            \"__ci__\": \"gitlabci\",\n        }\n    # Test for Bitbucket CI\n    if \"BITBUCKET_BRANCH\" in os.environ and \"BITBUCKET_BUILD_NUMBER\" in os.environ:\n        return {\n            \"pipeline_branch\": os.environ[\"BITBUCKET_BRANCH\"],\n            \"pipeline_build_no\": os.environ[\"BITBUCKET_BUILD_NUMBER\"],\n            \"__ci__\": \"bitbucketci\",\n        }\n    return {}\n\n\ndef determine_scm_revision():\n    for scm, cmd in ((\"git\", r\"git rev-parse HEAD\"), (\"p4\", r\"p4 changes -m1 \\#have\")):\n        p = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n        p_out, _ = p.communicate()\n        if p.returncode == 0:\n            scm_ref = p_out.decode(errors=\"ignore\").split(\"\\n\", maxsplit=1)[0]\n            if scm == \"p4\":\n                scm_ref = scm_ref.split()[1]\n            return scm_ref\n    return \"\"\n\n\ndef _get_cpu_string():\n    if platform.system().lower() == \"darwin\":\n        old_path = os.environ[\"PATH\"]\n        os.environ[\"PATH\"] = old_path + \":\" + \"/usr/sbin\"\n        ret = subprocess.check_output(\"sysctl -n machdep.cpu.brand_string\", shell=True)\n        os.environ[\"PATH\"] = old_path\n        return ret.decode().strip()\n    if platform.system().lower() == \"linux\":\n        with open(\"/proc/cpuinfo\", \"r\", encoding=\"utf-8\") as f:\n            lines = [i for i in f if i.startswith(\"model name\")]\n        if lines:\n            return lines[0].split(\":\")[1].strip()\n    return platform.processor()\n\n\nclass ExecutionContext:\n    def __init__(self):\n        self.__cpu_count = multiprocessing.cpu_count()\n        self.__cpu_vendor = _get_cpu_string()\n        if int(os.environ.get(\"PYTEST_MONITOR_FORCE_CPU_FREQ\", \"0\")):\n            self._read_cpu_freq_from_env()\n        else:\n            try:\n                self.__cpu_freq_base = psutil.cpu_freq().current\n            except (AttributeError, NotImplementedError, FileNotFoundError):\n                warnings.warn(\"Unable to fetch CPU frequency. Trying to read it from environment..\")\n                self._read_cpu_freq_from_env()\n        self.__proc_typ = platform.processor()\n        self.__tot_mem = int(psutil.virtual_memory().total / 1024**2)\n        self.__fqdn = socket.getfqdn()\n        self.__machine = platform.machine()\n        self.__arch = platform.architecture()[0]\n        self.__system = f\"{platform.system()} - {platform.release()}\"\n        self.__py_ver = sys.version\n\n    def _read_cpu_freq_from_env(self):\n        try:\n            self.__cpu_freq_base = float(os.environ.get(\"PYTEST_MONITOR_CPU_FREQ\", \"0.\"))\n        except (ValueError, TypeError):\n            warnings.warn(\"Wrong type/value while reading cpu frequency from environment. Forcing to 0.0.\")\n            self.__cpu_freq_base = 0.0\n\n    def to_dict(self):\n        return {\n            \"cpu_count\": self.cpu_count,\n            \"cpu_frequency\": self.cpu_frequency,\n            \"cpu_type\": self.cpu_type,\n            \"cpu_vendor\": self.cpu_vendor,\n            \"ram_total\": self.ram_total,\n            \"machine_node\": self.fqdn,\n            \"machine_type\": self.machine,\n            \"machine_arch\": self.architecture,\n            \"system_info\": self.system_info,\n            \"python_info\": self.python_info,\n            \"h\": self.compute_hash(),\n        }\n\n    @property\n    def cpu_count(self):\n        return self.__cpu_count\n\n    @property\n    def cpu_frequency(self):\n        return self.__cpu_freq_base\n\n    @property\n    def cpu_type(self):\n        return self.__proc_typ\n\n    @property\n    def cpu_vendor(self):\n        return self.__cpu_vendor\n\n    @property\n    def ram_total(self):\n        return self.__tot_mem\n\n    @property\n    def fqdn(self):\n        return self.__fqdn\n\n    @property\n    def machine(self):\n        return self.__machine\n\n    @property\n    def architecture(self):\n        return self.__arch\n\n    @property\n    def system_info(self):\n        return self.__system\n\n    @property\n    def python_info(self):\n        return self.__py_ver\n\n    def compute_hash(self):\n        hr = hashlib.md5()\n        hr.update(str(self.__cpu_count).encode())\n        hr.update(str(self.__cpu_freq_base).encode())\n        hr.update(str(self.__proc_typ).encode())\n        hr.update(str(self.__tot_mem).encode())\n        hr.update(str(self.__fqdn).encode())\n        hr.update(str(self.__machine).encode())\n        hr.update(str(self.__arch).encode())\n        hr.update(str(self.__system).encode())\n        hr.update(str(self.__py_ver).encode())\n        return hr.hexdigest()\n"
  },
  {
    "path": "requirements.dev.txt",
    "content": "psutil>=5.1.0\nmemory_profiler>=0.58\npytest\nrequests\nblack\nisort\nflake8=6.1.0\nflake8-builtins=2.1.0\nflake8-simplify=0.19.3\nflake8-comprehensions=3.10.1\nflake8-pytest-style=1.6.0\nflake8-return=1.2.0\nflake8-pyproject=1.2.3\npre-commit=3.3.3"
  },
  {
    "path": "requirements.txt",
    "content": "psutil>=5.1.0\nmemory_profiler>=0.58\npytest\nrequests\n"
  },
  {
    "path": "tests/conftest.py",
    "content": "pytest_plugins = [\"pytester\"]\n"
  },
  {
    "path": "tests/test_monitor.py",
    "content": "# -*- coding: utf-8 -*-\nimport json\nimport pathlib\nimport sqlite3\n\nimport pytest\n\n\ndef test_monitor_basic_test(testdir):\n    \"\"\"Make sure that pytest-monitor does the job without impacting user tests.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-vv\", \"--tag\", \"version=12.3.5\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the test suite\n    result.assert_outcomes(passed=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert len(cursor.fetchall()) == 1\n    cursor = db.cursor()\n    tags = json.loads(cursor.execute(\"SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;\").fetchone()[0])\n    assert \"description\" not in tags\n    assert \"version\" in tags\n    assert tags[\"version\"] == \"12.3.5\"\n\n\ndef test_monitor_basic_test_description(testdir):\n    \"\"\"Make sure that pytest-monitor does the job without impacting user tests.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-vv\", \"--description\", '\"Test\"', \"--tag\", \"version=12.3.5\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the test suite\n    result.assert_outcomes(passed=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert len(cursor.fetchall()) == 1\n    cursor = db.cursor()\n    tags = json.loads(cursor.execute(\"SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;\").fetchone()[0])\n    assert \"description\" in tags\n    assert tags[\"description\"] == '\"Test\"'\n    assert \"version\" in tags\n    assert tags[\"version\"] == \"12.3.5\"\n\n\ndef test_monitor_pytest_skip_marker(testdir):\n    \"\"\"Make sure that pytest-monitor does the job without impacting user tests.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import pytest\n    import time\n\n    @pytest.mark.skip(\"Some reason\")\n    def test_skipped():\n        assert True\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_skipped SKIPPED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(skipped=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert not len(cursor.fetchall())\n\n\ndef test_monitor_pytest_skip_marker_on_fixture(testdir):\n    \"\"\"Make sure that pytest-monitor does the job without impacting user tests.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import pytest\n    import time\n\n    @pytest.fixture\n    def a_fixture():\n        pytest.skip(\"because this is the scenario being tested\")\n\n    def test_skipped(a_fixture):\n        assert True\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_skipped SKIPPED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(skipped=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert not len(cursor.fetchall())\n\n\ndef test_bad_markers(testdir):\n    \"\"\"Make sure that pytest-monitor warns about unknown markers.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n        import pytest\n        import time\n\n\n        @pytest.mark.monitor_bad_marker\n        def test_ok():\n            time.sleep(0.1)\n            x = ['a' * i for i in range(100)]\n            assert len(x) == 100\n\n    \"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\", \"*Nothing known about marker monitor_bad_marker*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert len(cursor.fetchall()) == 1\n\n\ndef test_monitor_skip_module(testdir):\n    \"\"\"Make sure that pytest-monitor correctly understand the monitor_skip_test marker.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\nimport pytest\nimport time\n\npytestmark = pytest.mark.monitor_skip_test\n\ndef test_ok_not_monitored():\n    time.sleep(0.1)\n    x = ['a' * i for i in range(100)]\n    assert len(x) == 100\n\ndef test_another_function_ok_not_monitored():\n    assert True\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines(\n        [\n            \"*::test_ok_not_monitored PASSED*\",\n            \"*::test_another_function_ok_not_monitored PASSED*\",\n        ]\n    )\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=2)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert not len(cursor.fetchall())  # Nothing ran\n\n\ndef test_monitor_skip_test(testdir):\n    \"\"\"Make sure that pytest-monitor correctly understand the monitor_skip_test marker.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import pytest\n    import time\n\n\n    @pytest.mark.monitor_skip_test\n    def test_not_monitored():\n        time.sleep(0.1)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_not_monitored PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert not len(cursor.fetchall())  # nothing monitored\n\n\ndef test_monitor_skip_test_if(testdir):\n    \"\"\"Make sure that pytest-monitor correctly understand the monitor_skip_test_if marker.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import pytest\n    import time\n\n\n    @pytest.mark.monitor_skip_test_if(True)\n    def test_not_monitored():\n        time.sleep(0.1)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\n    @pytest.mark.monitor_skip_test_if(False)\n    def test_monitored():\n        time.sleep(0.1)\n        x = ['a' *i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_not_monitored PASSED*\", \"*::test_monitored PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=2)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert len(cursor.fetchall()) == 1\n\n\ndef test_monitor_no_db(testdir):\n    \"\"\"Make sure that pytest-monitor correctly understand the monitor_skip_test_if marker.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import pytest\n    import time\n\n\n    def test_it():\n        time.sleep(0.1)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\n    def test_that():\n        time.sleep(0.1)\n        x = ['a' *i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    wrn = \"pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.\"\n    with pytest.warns(UserWarning, match=wrn):\n        # run pytest with the following cmd args\n        result = testdir.runpytest(\"--no-db\", \"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_it PASSED*\", \"*::test_that PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert not pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=2)\n\n\ndef test_monitor_basic_output(testdir):\n    \"\"\"Make sure that pytest-monitor does not repeat captured output (issue #26).\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n        def test_it():\n            print('Hello World')\n    \"\"\"\n    )\n\n    wrn = \"pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.\"\n    with pytest.warns(UserWarning, match=wrn):\n        # run pytest with the following cmd args\n        result = testdir.runpytest(\"--no-db\", \"-s\", \"-vv\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_it Hello World*\"])\n    assert \"Hello World\" != result.stdout.get_lines_after(\"*Hello World\")[0]\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n\n\ndef test_monitor_with_doctest(testdir):\n    \"\"\"Make sure that pytest-monitor does not fail to run doctest.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        '''\n        def run(a, b):\n            \"\"\"\n            >>> run(3, 30)\n            33\n            \"\"\"\n            return a + b\n    '''\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"--doctest-modules\", \"-vv\")\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert not len(cursor.fetchall())\n\n    pymon_path.unlink()\n    result = testdir.runpytest(\"--doctest-modules\", \"--no-monitor\", \"-vv\")\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n    assert not pymon_path.exists()\n"
  },
  {
    "path": "tests/test_monitor_component.py",
    "content": "# -*- coding: utf-8 -*-\nimport pathlib\nimport sqlite3\n\n\ndef test_monitor_no_component(testdir):\n    \"\"\"Make sure that pytest-monitor has an empty component by default\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert len(cursor.fetchall()) == 1\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';\")\n    assert not len(cursor.fetchall())\n\n\ndef test_monitor_force_component(testdir):\n    \"\"\"Make sure that pytest-monitor forces the component name if required\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_force_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"--force-component\", \"my_component\", \"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_force_ok PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert len(cursor.fetchall()) == 1\n    cursor.execute(\n        \"SELECT ITEM FROM TEST_METRICS\" \" WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';\"\n    )\n    assert len(cursor.fetchall()) == 1\n\n\ndef test_monitor_prefix_component(testdir):\n    \"\"\"Make sure that pytest-monitor has a prefixed component\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n    pytest_monitor_component = 'internal'\n\n    def test_prefix_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"--component-prefix\", \"my_component\", \"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_prefix_ok PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert len(cursor.fetchall()) == 1\n    cursor.execute(\n        \"SELECT ITEM FROM TEST_METRICS\" \" WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';\"\n    )\n    assert not len(cursor.fetchall())\n    cursor.execute(\n        \"SELECT ITEM FROM TEST_METRICS\" \" WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';\"\n    )\n    assert len(cursor.fetchall()) == 1\n\n\ndef test_monitor_prefix_without_component(testdir):\n    \"\"\"Make sure that pytest-monitor has a prefixed component\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_prefix_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"--component-prefix\", \"my_component\", \"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_prefix_ok PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    assert len(cursor.fetchall()) == 1\n    cursor.execute(\n        \"SELECT ITEM FROM TEST_METRICS\" \" WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';\"\n    )\n    assert len(cursor.fetchall()) == 1\n"
  },
  {
    "path": "tests/test_monitor_context.py",
    "content": "import os\nimport pathlib\nimport sqlite3\n\nimport mock\nimport pytest\n\nCPU_FREQ_PATH = \"pytest_monitor.sys_utils.psutil.cpu_freq\"\n\nTEST_CONTENT = \"\"\"\nimport time\n\n\ndef test_ok():\n    time.sleep(0.5)\n    x = ['a' * i for i in range(100)]\n    assert len(x) == 100\n\"\"\"\n\n\ndef get_nb_metrics_with_cpu_freq(path):\n    db_path = path / \".pymon\"\n    db = sqlite3.connect(db_path.as_posix())\n    cursor = db.cursor()\n    cursor.execute(\"SELECT ITEM FROM TEST_METRICS;\")\n    nb_metrics = len(cursor.fetchall())\n    cursor = db.cursor()\n    cursor.execute(\"SELECT CPU_FREQUENCY_MHZ FROM EXECUTION_CONTEXTS;\")\n    rows = cursor.fetchall()\n    assert len(rows) == 1\n    cpu_freq = rows[0][0]\n    return nb_metrics, cpu_freq\n\n\ndef test_force_cpu_freq_set_0_use_psutil(testdir):\n    \"\"\"Test that when force mode is set, we do not call psutil to fetch CPU's frequency\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(TEST_CONTENT)\n\n    with mock.patch(CPU_FREQ_PATH, return_value=1500) as cpu_freq_mock:\n        os.environ[\"PYTEST_MONITOR_FORCE_CPU_FREQ\"] = \"0\"\n        os.environ[\"PYTEST_MONITOR_CPU_FREQ\"] = \"3000\"\n        # run pytest with the following cmd args\n        result = testdir.runpytest(\"-vv\")\n        del os.environ[\"PYTEST_MONITOR_FORCE_CPU_FREQ\"]\n        del os.environ[\"PYTEST_MONITOR_CPU_FREQ\"]\n        cpu_freq_mock.assert_called()\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n    # make sure that we get a '0' exit code for the test suite\n    result.assert_outcomes(passed=1)\n\n    nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))\n\n    assert (nb_metrics, cpu_freq) == (1, 3000)\n\n\ndef test_force_cpu_freq(testdir):\n    \"\"\"Test that when force mode is set, we do not call psutil to fetch CPU's frequency\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(TEST_CONTENT)\n\n    with mock.patch(CPU_FREQ_PATH, return_value=1500) as cpu_freq_mock:\n        os.environ[\"PYTEST_MONITOR_FORCE_CPU_FREQ\"] = \"1\"\n        os.environ[\"PYTEST_MONITOR_CPU_FREQ\"] = \"3000\"\n        # run pytest with the following cmd args\n        result = testdir.runpytest(\"-vv\")\n        del os.environ[\"PYTEST_MONITOR_FORCE_CPU_FREQ\"]\n        del os.environ[\"PYTEST_MONITOR_CPU_FREQ\"]\n        cpu_freq_mock.assert_not_called()\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n    # make sure that we get a '0' exit code for the test suite\n    result.assert_outcomes(passed=1)\n\n    nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))\n\n    assert (nb_metrics, cpu_freq) == (1, 3000)\n\n\n@pytest.mark.parametrize(\"effect\", [AttributeError, NotImplementedError, FileNotFoundError])\ndef test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback(effect, testdir):\n    \"\"\"Make sure that pytest-monitor fallback takes value of CPU FREQ from special env var\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(TEST_CONTENT)\n\n    with mock.patch(CPU_FREQ_PATH, side_effect=effect) as cpu_freq_mock:\n        os.environ[\"PYTEST_MONITOR_CPU_FREQ\"] = \"3000\"\n        # run pytest with the following cmd args\n        result = testdir.runpytest(\"-vv\")\n        del os.environ[\"PYTEST_MONITOR_CPU_FREQ\"]\n        cpu_freq_mock.assert_called()\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n    # make sure that we get a '0' exit code for the test suite\n    result.assert_outcomes(passed=1)\n\n    nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))\n\n    assert (nb_metrics, cpu_freq) == (1, 3000)\n\n\n@pytest.mark.parametrize(\"effect\", [AttributeError, NotImplementedError, FileNotFoundError])\ndef test_when_cpu_freq_cannot_fetch_frequency_set_freq_to_0(effect, testdir):\n    \"\"\"Make sure that pytest-monitor's fallback mechanism is efficient enough.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(TEST_CONTENT)\n\n    with mock.patch(CPU_FREQ_PATH, side_effect=effect) as cpu_freq_mock:\n        # run pytest with the following cmd args\n        result = testdir.runpytest(\"-vv\")\n        cpu_freq_mock.assert_called()\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n    # make sure that we get a '0' exit code for the test suite\n    result.assert_outcomes(passed=1)\n\n    nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))\n\n    assert (nb_metrics, cpu_freq) == (1, 0)\n\n\n@mock.patch(\"pytest_monitor.sys_utils.psutil.cpu_freq\", return_value=None)\ndef test_when_cpu_freq_cannot_fetch_frequency(cpu_freq_mock, testdir):\n    \"\"\"Make sure that pytest-monitor does the job when we have issue in collecing context resources\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(TEST_CONTENT)\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-vv\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n    # make sure that we get a '0' exit code for the test suite\n    result.assert_outcomes(passed=1)\n\n    nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))\n\n    assert (nb_metrics, cpu_freq) == (1, 0)\n"
  },
  {
    "path": "tests/test_monitor_in_ci.py",
    "content": "# -*- coding: utf-8 -*-\nimport os\nimport pathlib\nimport sqlite3\n\n\ndef test_monitor_no_ci(testdir):\n    \"\"\"Make sure that pytest-monitor does not insert CI information.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    envs = {}\n    for k in [\n        \"CIRCLE_BUILD_NUM\",\n        \"CIRCLE_JOB\",\n        \"DRONE_REPO_BRANCH\",\n        \"DRONE_BUILD_NUMBER\",\n        \"BUILD_NUMBER\",\n        \"JOB_NUMBER\",\n        \"JOB_NAME\",\n        \"TRAVIS_BUILD_ID\",\n        \"TRAVIS_BUILD_NUMBER\",\n        \"CI_PIPELINE_ID\",\n        \"CI_JOB_NAME\",\n        \"BITBUCKET_BRANCH\",\n        \"BITBUCKET_BUILD_NUMBER\",\n    ]:\n        if k in os.environ:\n            envs[k] = os.environ[k]\n            del os.environ[k]\n\n    # run pytest with the following cmd args\n    result = testdir.runpytest(\"-v\")\n\n    # fnmatch_lines does an assertion internally\n    result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n    pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n    assert pymon_path.exists()\n\n    # make sure that that we get a '0' exit code for the testsuite\n    result.assert_outcomes(passed=1)\n\n    db = sqlite3.connect(str(pymon_path))\n    cursor = db.cursor()\n    cursor.execute(\"SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;\")\n    desc = cursor.fetchall()\n    assert len(desc) == 1  # current test\n    assert desc[0][0] == \"{}\"\n    for k in envs.keys():\n        os.environ[k] = envs[k]\n\n\ndef test_monitor_jenkins_ci(testdir):\n    \"\"\"Make sure that pytest-monitor correctly handle Jenkins CI information.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    def check_that(the_result, match):\n        # fnmatch_lines does an assertion internally\n        the_result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n        pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n        assert pymon_path.exists()\n\n        # make sure that that we get a '0' exit code for the testsuite\n        the_result.assert_outcomes(passed=1)\n\n        db = sqlite3.connect(str(pymon_path))\n        cursor = db.cursor()\n        cursor.execute(\"SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;\")\n        desc = cursor.fetchall()\n        assert len(desc) == 1  # current test\n        assert desc[0][0] == match\n        pymon_path.unlink()\n\n    run_description = '{\"pipeline_branch\": \"test\", \"pipeline_build_no\": \"123\", \"__ci__\": \"jenkinsci\"}'\n\n    envs = {}\n    for k in [\n        \"CIRCLE_BUILD_NUM\",\n        \"CIRCLE_JOB\",\n        \"DRONE_REPO_BRANCH\",\n        \"DRONE_BUILD_NUMBER\",\n        \"BUILD_NUMBER\",\n        \"JOB_NUMBER\",\n        \"JOB_NAME\",\n        \"TRAVIS_BUILD_ID\",\n        \"TRAVIS_BUILD_NUMBER\",\n        \"CI_PIPELINE_ID\",\n        \"CI_JOB_NAME\",\n        \"BITBUCKET_BRANCH\",\n        \"BITBUCKET_BUILD_NUMBER\",\n    ]:\n        if k in os.environ:\n            envs[k] = os.environ[k]\n            del os.environ[k]\n\n    for env, exp in [\n        ({\"BUILD_NUMBER\": \"123\"}, \"{}\"),\n        ({\"BUILD_NUMBER\": \"123\", \"JOB_NAME\": \"test\"}, run_description),\n        ({\"BUILD_NUMBER\": \"123\", \"BRANCH_NAME\": \"test\"}, run_description),\n        (\n            {\"BUILD_NUMBER\": \"123\", \"JOB_NAME\": \"test-123\", \"BRANCH_NAME\": \"test\"},\n            run_description,\n        ),\n    ]:\n        if \"BUILD_NUMBER\" in os.environ:\n            del os.environ[\"BUILD_NUMBER\"]\n        if \"JOB_NUMBER\" in os.environ:\n            del os.environ[\"JOB_NAME\"]\n        if \"BRANCH_NUMBER\" in os.environ:\n            del os.environ[\"BRANCH_NAME\"]\n\n        for k, v in env.items():\n            os.environ[k] = v\n\n        result = testdir.runpytest(\"-v\")\n        check_that(result, match=exp)\n\n    if \"BUILD_NUMBER\" in os.environ:\n        del os.environ[\"BUILD_NUMBER\"]\n    if \"JOB_NUMBER\" in os.environ:\n        del os.environ[\"JOB_NAME\"]\n    if \"BRANCH_NUMBER\" in os.environ:\n        del os.environ[\"BRANCH_NAME\"]\n\n\ndef test_monitor_gitlab_ci(testdir):\n    \"\"\"Make sure that pytest-monitor correctly handle Gitlab CI information.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    def check_that(the_result, match):\n        # fnmatch_lines does an assertion internally\n        the_result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n        pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n        assert pymon_path.exists()\n\n        # make sure that that we get a '0' exit code for the testsuite\n        the_result.assert_outcomes(passed=1)\n\n        db = sqlite3.connect(str(pymon_path))\n        cursor = db.cursor()\n        cursor.execute(\"SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;\")\n        desc = cursor.fetchall()\n        assert len(desc) == 1  # current test\n        assert desc[0][0] == match\n        pymon_path.unlink()\n\n    run_description = '{\"pipeline_branch\": \"test\", \"pipeline_build_no\": \"123\", \"__ci__\": \"gitlabci\"}'\n    envs = {}\n    for k in [\n        \"CIRCLE_BUILD_NUM\",\n        \"CIRCLE_JOB\",\n        \"DRONE_REPO_BRANCH\",\n        \"DRONE_BUILD_NUMBER\",\n        \"BUILD_NUMBER\",\n        \"JOB_NUMBER\",\n        \"JOB_NAME\",\n        \"TRAVIS_BUILD_ID\",\n        \"TRAVIS_BUILD_NUMBER\",\n        \"CI_PIPELINE_ID\",\n        \"CI_JOB_NAME\",\n        \"BITBUCKET_BRANCH\",\n        \"BITBUCKET_BUILD_NUMBER\",\n    ]:\n        if k in os.environ:\n            envs[k] = os.environ[k]\n            del os.environ[k]\n\n    for env, exp in [\n        ({\"CI_PIPELINE_ID\": \"123\"}, \"{}\"),\n        ({\"CI_PIPELINE_ID\": \"123\", \"CI_JOB_NAME\": \"test\"}, run_description),\n        ({\"CI_JOB_NAME\": \"123\"}, \"{}\"),\n    ]:\n        if \"CI_PIPELINE_ID\" in os.environ:\n            del os.environ[\"CI_PIPELINE_ID\"]\n        if \"CI_JOB_NAME\" in os.environ:\n            del os.environ[\"CI_JOB_NAME\"]\n\n        for k, v in env.items():\n            os.environ[k] = v\n\n        result = testdir.runpytest(\"-v\")\n        check_that(result, match=exp)\n\n    if \"CI_PIPELINE_ID\" in os.environ:\n        del os.environ[\"CI_PIPELINE_ID\"]\n    if \"CI_JOB_NAME\" in os.environ:\n        del os.environ[\"CI_JOB_NAME\"]\n\n\ndef test_monitor_travis_ci(testdir):\n    \"\"\"Make sure that pytest-monitor correctly handle Travis CI information.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    def check_that(the_result, match):\n        # fnmatch_lines does an assertion internally\n        the_result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n        pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n        assert pymon_path.exists()\n\n        # make sure that that we get a '0' exit code for the testsuite\n        the_result.assert_outcomes(passed=1)\n\n        db = sqlite3.connect(str(pymon_path))\n        cursor = db.cursor()\n        cursor.execute(\"SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;\")\n        desc = cursor.fetchall()\n        assert len(desc) == 1  # current test\n        assert desc[0][0] == match\n        pymon_path.unlink()\n\n    run_description = '{\"pipeline_branch\": \"test\", \"pipeline_build_no\": \"123\", \"__ci__\": \"travisci\"}'\n    envs = {}\n    for k in [\n        \"CIRCLE_BUILD_NUM\",\n        \"CIRCLE_JOB\",\n        \"DRONE_REPO_BRANCH\",\n        \"DRONE_BUILD_NUMBER\",\n        \"BUILD_NUMBER\",\n        \"JOB_NUMBER\",\n        \"JOB_NAME\",\n        \"TRAVIS_BUILD_ID\",\n        \"TRAVIS_BUILD_NUMBER\",\n        \"CI_PIPELINE_ID\",\n        \"CI_JOB_NAME\",\n        \"BITBUCKET_BRANCH\",\n        \"BITBUCKET_BUILD_NUMBER\",\n    ]:\n        if k in os.environ:\n            envs[k] = os.environ[k]\n            del os.environ[k]\n\n    for env, exp in [\n        ({\"TRAVIS_BUILD_NUMBER\": \"123\"}, \"{}\"),\n        ({\"TRAVIS_BUILD_NUMBER\": \"123\", \"TRAVIS_BUILD_ID\": \"test\"}, run_description),\n        ({\"TRAVIS_BUILD_ID\": \"test-123\"}, \"{}\"),\n    ]:\n        if \"TRAVIS_BUILD_NUMBER\" in os.environ:\n            del os.environ[\"TRAVIS_BUILD_NUMBER\"]\n        if \"TRAVIS_BUILD_ID\" in os.environ:\n            del os.environ[\"TRAVIS_BUILD_ID\"]\n\n        for k, v in env.items():\n            os.environ[k] = v\n\n        result = testdir.runpytest(\"-v\")\n        check_that(result, match=exp)\n\n    if \"TRAVIS_BUILD_NUMBER\" in os.environ:\n        del os.environ[\"TRAVIS_BUILD_NUMBER\"]\n    if \"TRAVIS_BUILD_ID\" in os.environ:\n        del os.environ[\"TRAVIS_BUILD_ID\"]\n\n\ndef test_monitor_circle_ci(testdir):\n    \"\"\"Make sure that pytest-monitor correctly handle Circle CI information.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    def check_that(the_result, match):\n        # fnmatch_lines does an assertion internally\n        the_result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n        pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n        assert pymon_path.exists()\n\n        # make sure that that we get a '0' exit code for the testsuite\n        the_result.assert_outcomes(passed=1)\n\n        db = sqlite3.connect(str(pymon_path))\n        cursor = db.cursor()\n        cursor.execute(\"SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;\")\n        desc = cursor.fetchall()\n        assert len(desc) == 1  # current test\n        assert desc[0][0] == match\n        pymon_path.unlink()\n\n    run_description = '{\"pipeline_branch\": \"test\", \"pipeline_build_no\": \"123\", \"__ci__\": \"circleci\"}'\n    envs = {}\n    for k in [\n        \"CIRCLE_BUILD_NUM\",\n        \"CIRCLE_JOB\",\n        \"DRONE_REPO_BRANCH\",\n        \"DRONE_BUILD_NUMBER\",\n        \"BUILD_NUMBER\",\n        \"JOB_NUMBER\",\n        \"JOB_NAME\",\n        \"TRAVIS_BUILD_ID\",\n        \"TRAVIS_BUILD_NUMBER\",\n        \"CI_PIPELINE_ID\",\n        \"CI_JOB_NAME\",\n        \"BITBUCKET_BRANCH\",\n        \"BITBUCKET_BUILD_NUMBER\",\n    ]:\n        if k in os.environ:\n            envs[k] = os.environ[k]\n            del os.environ[k]\n\n    for env, exp in [\n        ({\"CIRCLE_BUILD_NUM\": \"123\"}, \"{}\"),\n        ({\"CIRCLE_BUILD_NUM\": \"123\", \"CIRCLE_JOB\": \"test\"}, run_description),\n        ({\"CIRCLE_JOB\": \"test\"}, \"{}\"),\n    ]:\n        if \"CIRCLE_BUILD_NUM\" in os.environ:\n            del os.environ[\"CIRCLE_BUILD_NUM\"]\n        if \"CIRCLE_JOB\" in os.environ:\n            del os.environ[\"CIRCLE_JOB\"]\n\n        for k, v in env.items():\n            os.environ[k] = v\n\n        result = testdir.runpytest(\"-v\")\n        check_that(result, match=exp)\n\n    if \"CIRCLE_BUILD_NUM\" in os.environ:\n        del os.environ[\"CIRCLE_BUILD_NUM\"]\n    if \"CIRCLE_JOB\" in os.environ:\n        del os.environ[\"CIRCLE_JOB\"]\n\n\ndef test_monitor_drone_ci(testdir):\n    \"\"\"Make sure that pytest-monitor correctly handle Jenkins CI information.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    def check_that(the_result, match):\n        # fnmatch_lines does an assertion internally\n        the_result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n        pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n        assert pymon_path.exists()\n\n        # make sure that that we get a '0' exit code for the testsuite\n        the_result.assert_outcomes(passed=1)\n\n        db = sqlite3.connect(str(pymon_path))\n        cursor = db.cursor()\n        cursor.execute(\"SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;\")\n        desc = cursor.fetchall()\n        assert len(desc) == 1  # current test\n        assert desc[0][0] == match\n        pymon_path.unlink()\n\n    run_description = '{\"pipeline_branch\": \"test\", \"pipeline_build_no\": \"123\", \"__ci__\": \"droneci\"}'\n    envs = {}\n    for k in [\n        \"CIRCLE_BUILD_NUM\",\n        \"CIRCLE_JOB\",\n        \"DRONE_REPO_BRANCH\",\n        \"DRONE_BUILD_NUMBER\",\n        \"BUILD_NUMBER\",\n        \"JOB_NUMBER\",\n        \"JOB_NAME\",\n        \"TRAVIS_BUILD_ID\",\n        \"TRAVIS_BUILD_NUMBER\",\n        \"CI_PIPELINE_ID\",\n        \"CI_JOB_NAME\",\n        \"BITBUCKET_BRANCH\",\n        \"BITBUCKET_BUILD_NUMBER\",\n    ]:\n        if k in os.environ:\n            envs[k] = os.environ[k]\n            del os.environ[k]\n\n    for env, exp in [\n        ({\"DRONE_BUILD_NUMBER\": \"123\"}, \"{}\"),\n        ({\"DRONE_BUILD_NUMBER\": \"123\", \"DRONE_REPO_BRANCH\": \"test\"}, run_description),\n        ({\"DRONE_REPO_BRANCH\": \"test\"}, \"{}\"),\n    ]:\n        if \"DRONE_REPO_BRANCH\" in os.environ:\n            del os.environ[\"DRONE_REPO_BRANCH\"]\n        if \"DRONE_BUILD_NUMBER\" in os.environ:\n            del os.environ[\"DRONE_BUILD_NUMBER\"]\n\n        for k, v in env.items():\n            os.environ[k] = v\n\n        result = testdir.runpytest(\"-v\")\n        check_that(result, match=exp)\n\n    if \"DRONE_REPO_BRANCH\" in os.environ:\n        del os.environ[\"DRONE_REPO_BRANCH\"]\n    if \"DRONE_BUILD_NUMBER\" in os.environ:\n        del os.environ[\"DRONE_BUILD_NUMBER\"]\n\ndef test_monitor_bitbucket_ci(testdir):\n    \"\"\"Make sure that pytest-monitor correctly handle Bitbucket CI information.\"\"\"\n    # create a temporary pytest test module\n    testdir.makepyfile(\n        \"\"\"\n    import time\n\n\n    def test_ok():\n        time.sleep(0.5)\n        x = ['a' * i for i in range(100)]\n        assert len(x) == 100\n\n\"\"\"\n    )\n\n    def check_that(the_result, match):\n        # fnmatch_lines does an assertion internally\n        the_result.stdout.fnmatch_lines([\"*::test_ok PASSED*\"])\n\n        pymon_path = pathlib.Path(str(testdir)) / \".pymon\"\n        assert pymon_path.exists()\n\n        # make sure that that we get a '0' exit code for the testsuite\n        the_result.assert_outcomes(passed=1)\n\n        db = sqlite3.connect(str(pymon_path))\n        cursor = db.cursor()\n        cursor.execute(\"SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;\")\n        desc = cursor.fetchall()\n        assert len(desc) == 1  # current test\n        assert desc[0][0] == match\n        pymon_path.unlink()\n\n    run_description = '{\"pipeline_branch\": \"test\", \"pipeline_build_no\": \"123\", \"__ci__\": \"bitbucketci\"}'\n    envs = {}\n    for k in [\n        \"CIRCLE_BUILD_NUM\",\n        \"CIRCLE_JOB\",\n        \"DRONE_REPO_BRANCH\",\n        \"DRONE_BUILD_NUMBER\",\n        \"BUILD_NUMBER\",\n        \"JOB_NUMBER\",\n        \"JOB_NAME\",\n        \"TRAVIS_BUILD_ID\",\n        \"TRAVIS_BUILD_NUMBER\",\n        \"CI_PIPELINE_ID\",\n        \"CI_JOB_NAME\",\n        \"BITBUCKET_BRANCH\",\n        \"BITBUCKET_BUILD_NUMBER\",\n    ]:\n        if k in os.environ:\n            envs[k] = os.environ[k]\n            del os.environ[k]\n\n    for env, exp in [\n        ({\"BITBUCKET_BUILD_NUMBER\": \"123\"}, \"{}\"),\n        ({\"BITBUCKET_BUILD_NUMBER\": \"123\", \"BITBUCKET_BRANCH\": \"test\"}, run_description),\n        ({\"BITBUCKET_BRANCH\": \"test\"}, \"{}\"),\n    ]:\n        if \"BITBUCKET_BRANCH\" in os.environ:\n            del os.environ[\"BITBUCKET_BRANCH\"]\n        if \"BITBUCKET_BUILD_NUMBER\" in os.environ:\n            del os.environ[\"BITBUCKET_BUILD_NUMBER\"]\n\n        for k, v in env.items():\n            os.environ[k] = v\n\n        result = testdir.runpytest(\"-v\")\n        check_that(result, match=exp)\n\n    if \"BITBUCKET_BRANCH\" in os.environ:\n        del os.environ[\"BITBUCKET_BRANCH\"]\n    if \"BITBUCKET_BUILD_NUMBER\" in os.environ:\n        del os.environ[\"BITBUCKET_BUILD_NUMBER\"]\n"
  },
  {
    "path": "tox.ini",
    "content": "# For more information about tox, see https://tox.readthedocs.io/en/latest/\n[tox]\nenvlist = py27,py34,py35,py36,py37,pypy,flake8\n\n[testenv]\ndeps = pytest>=3.0\ncommands = pytest {posargs:tests}\n\n[testenv:flake8]\nskip_install = true\ndeps = flake8\ncommands = flake8 pytest_monitor.py setup.py tests\n"
  }
]