Repository: CFMTech/pytest-monitor
Branch: master
Commit: 30585e4fae0d
Files: 53
Total size: 156.2 KB
Directory structure:
gitextract_2sbi1wsi/
├── .circleci/
│ └── config.yml
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.md
│ │ └── feature_request.md
│ └── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── .gitlab-ci.yml
├── .pre-commit-config.yaml
├── .readthedocs.yml
├── AUTHORS
├── CONTRIBUTING.rst
├── LICENSE
├── MANIFEST.in
├── README.rst
├── docs/
│ ├── env.yml
│ ├── requirements.txt
│ └── sources/
│ ├── Makefile
│ ├── changelog.rst
│ ├── conf.py
│ ├── configuration.rst
│ ├── contributing.rst
│ ├── index.rst
│ ├── installation.rst
│ ├── introduction.rst
│ ├── make.bat
│ ├── operating.rst
│ ├── remote.rst
│ └── run.rst
├── examples/
│ ├── pkg1/
│ │ ├── __init__.py
│ │ ├── test_mod1.py
│ │ └── test_mod2.py
│ ├── pkg2/
│ │ ├── __init__.py
│ │ └── test_mod_a.py
│ ├── pkg3/
│ │ ├── __init__.py
│ │ └── test_mod_cl.py
│ ├── pkg4/
│ │ ├── __init__.py
│ │ └── test_mod_a.py
│ └── pkg5/
│ ├── __init__.py
│ ├── doctest.py
│ └── test_special_pytest.py
├── pyproject.toml
├── pytest_monitor/
│ ├── __init__.py
│ ├── handler.py
│ ├── pytest_monitor.py
│ ├── session.py
│ └── sys_utils.py
├── requirements.dev.txt
├── requirements.txt
├── tests/
│ ├── conftest.py
│ ├── test_monitor.py
│ ├── test_monitor_component.py
│ ├── test_monitor_context.py
│ └── test_monitor_in_ci.py
└── tox.ini
================================================
FILE CONTENTS
================================================
================================================
FILE: .circleci/config.yml
================================================
version: 2.1
aliases:
docker-image: &image
- image: mambaorg/micromamba
filter-pr-only: &PR-only
branches:
ignore:
- master
tags:
ignore:
- /.*/
filter-master-only: &master-only
branches:
only:
- master
filter-tags-only: &official-tag
branches:
ignore:
- /.*/
tags:
only:
- /^pytest-monitor-.*/
matrix: &build-matrix
parameters:
python: [ "3.8", "3.9", "3.10", "3.11" ]
pytest: [ "6.1", "7" ]
exclude:
- pytest: "6.1"
python: "3.11"
- pytest: "6.1"
python: "3.9"
- pytest: "6.1"
python: "3.10"
commands:
make-env:
description: "Create a brand new environment"
parameters:
python:
type: string
default: "3"
description: "Python version to use for building"
pytest:
type: string
default: "7"
description: "Pytest version to use for testing"
use_specific_requirements_file:
type: string
default: "requirements.txt"
description: "Add specific requirements listed in a file to the environment. "
extra_deps:
type: string
default: ""
description: "Extra dependencies to install (given as a space separated string)"
channels:
type: string
default: "https://conda.anaconda.org/conda-forge"
description: "List of channels for fetching packages"
publish_mode:
type: boolean
default: false
description: "If true, does not pin versions in requirements.txt"
steps:
- when:
condition:
not: << parameters.publish_mode >>
steps:
- checkout
- run:
name: "Apply dependency constraints"
command: |
if [ "<< parameters.pytest >>" != "" ]; then
sed -i 's/^pytest/pytest=<< parameters.pytest >>/g' << parameters.use_specific_requirements_file >>
fi
echo "" >> << parameters.use_specific_requirements_file >>
if [ "<< parameters.extra_deps >>" != "" ]; then
for dep in << parameters.extra_deps >>
do
echo $dep >> << parameters.use_specific_requirements_file >>
done
fi
- run:
name: "Create environment"
command: |
micromamba create -n project
channels=$(echo << parameters.channels >> | sed "s/ / -c /g")
requirements=$(cat << parameters.use_specific_requirements_file >> | tr '\n' ' ')
micromamba install -n project -y python=<< parameters.python >> pip $requirements -c $channels
- run:
name: "Install project in environment"
command: |
eval "$(micromamba shell hook --shell=bash)"
micromamba activate project
python -m pip install -e .
- run:
name: "Dumping env"
command: |
micromamba env export --name project --explicit > manifest.txt
- store_artifacts:
path: manifest.txt
- when:
condition: << parameters.publish_mode >>
steps:
- checkout
- run:
name: "Create environment"
command: |
micromamba create -n project
channels=$(echo << parameters.channels >> | sed "s/ / -c /g")
requirements=$(cat requirements.txt | tr '\n' ' ')
micromamba install -n project -y python=<< parameters.python >> $requirements -c $channels
micromamba install -n project -y << parameters.extra_deps >> -c $channels
lint-project:
description: "Check code style"
steps:
- run:
name: "Check formatting (black)"
command: |
eval "$(micromamba shell hook --shell=bash)"
micromamba activate project
black .
- run:
name: "Check code style (flake8)"
command: |
eval "$(micromamba shell hook --shell=bash)"
micromamba activate project
flake8 .
- run:
name: "Check import order (isort)"
command: |
eval "$(micromamba shell hook --shell=bash)"
micromamba activate project
isort .
test-project:
description: "Run all the test and store the results"
parameters:
runner:
type: string
default: "pytest"
description: "Test executor"
params:
type: string
default: "-v"
description: "Test executor parameters"
steps:
- run:
name: "Launch test"
command: |
eval "$(micromamba shell hook --shell=bash)"
micromamba activate project
mkdir test-results
<< parameters.runner >> <<parameters.params >> --junit-xml=test-results/junit.xml
- store_test_results:
path: test-results/junit.xml
- store_artifacts:
path: test-results/junit.xml
inject-pypi:
description: "Inject pypi credentials"
steps:
- run:
name: "Setup Pypi"
command: |
echo -e "[pypi]" >> ~/.pypirc
echo -e "username = __token__" >> ~/.pypirc
echo -e "password = $PYPI_PASSWORD" >> ~/.pypirc
package-project:
description: "Package project"
steps:
- run:
name: "Make Packages"
command: |
eval "$(micromamba shell hook --shell=bash)"
micromamba activate project
python -m build
publish-project:
description: "Send sdist and wheels to Pypi"
steps:
- run:
name: "Publish"
command: |
eval "$(micromamba shell hook --shell=bash)"
micromamba activate project
twine upload dist/*
# Workflow definition
workflows:
PR:
jobs:
- lint
- build:
matrix: *build-matrix
name: "build-py<< matrix.python >>-pytest << matrix.pytest >>"
filters: *PR-only
requires:
- lint
deploy:
jobs:
- publish:
filters: *official-tag
nightly:
triggers:
- schedule:
cron: "0 0 * * *"
filters: *master-only
jobs:
- build:
python: "3"
pytest: "7"
jobs:
lint:
docker: *image
steps:
- make-env:
use_specific_requirements_file: requirements.dev.txt
- lint-project
build:
docker: *image
parameters:
python:
type: string
pytest:
type: string
steps:
- make-env:
extra_deps: mock
python: << parameters.python >>
pytest: << parameters.pytest >>
- test-project
publish:
docker: *image
steps:
- make-env:
extra_deps: twine setuptools build
channels: https://conda.anaconda.org/conda-forge defaults anaconda
publish_mode: true
- inject-pypi
- package-project
- publish-project
================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.md
================================================
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. Linux, macOS, Windows]
- Python version: [e.g. 3.9.7]
- Pytest version: [e.g. 6.2.3]
- pytest-monitor version: [e.g. 1.6.2]
**Additional context**
Add any other context about the problem here.
================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.md
================================================
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
================================================
FILE: .github/PULL_REQUEST_TEMPLATE.md
================================================
<!--
:tada: Thanks for submitting a PR to `pytest-monitor` :tada:
This template is here to guide you with your submission. Please fill it in thoroughly, but delete any session that seems irrelevant. (It's OK to leave unticked boxes for draft PRs or until details are cleared up).
Do not hesitate to use the full extent of [Markdown formatting][markdown_formatting] to make your submission clearer and more explicit. You can see a preview of how the text renders by switching to the *Preview* tab just above this panel.
[markdown_formatting]: https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet
If all is clear, you can also delete this paragraph!
-->
# Description
<!--
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. (e.g. is a specific `pytest` version required ?)
-->
Fixes #(issue)
# Type of change
<!--
Please delete options that are not relevant.
-->
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
# Checklist:
<!--
If an option is not relevant to your PR, do not delete it but use ~strikethrough formating on it~. This helps keeping track of the entire list.
-->
- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes (not just the [CI](https://link.to.ci))
- [ ] Any dependent changes have been merged and published in downstream modules
- [ ] I have provided a link to the issue this PR adresses in the Description section above (If there is none yet,
[create one](https://github.com/CFMTech/pytest-monitor/issues) !)
- [ ] I have updated the [changelog](https://github.com/CFMTech/pytest-monitor/blob/master/docs/sources/changelog.rst)
- [ ] I have labeled my PR using appropriate tags (in particular using status labels like [`Status: Code Review Needed`](https://github.com/jsd-spif/pymonitor/labels/Status%3A%20Code%20Review%20Needed), [`Business: Test Needed`](https://github.com/jsd-spif/pymonitor/labels/Business%3A%20Test%20Needed) or [`Status: In Progress`](https://github.com/jsd-spif/pymonitor/labels/Status%3A%20In%20Progress) if you are still working on the PR)
Do not forget to @ the people that needs to do the review
<!--
Thanks for contributing! :pray:
-->
================================================
FILE: .gitignore
================================================
**/.pymon
.idea/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
.pytest_cache
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask instance folder
instance/
# Sphinx documentation
docs/sources/_build/
# MkDocs documentation
/site/
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
================================================
FILE: .gitlab-ci.yml
================================================
image: continuumio/miniconda
stages:
- test
- deploy
before_script:
- conda create -q -n pymon -y python=3.6
- conda install -q -n pymon psutil memory_profiler pytest -c https://conda.anaconda.org/conda-forge -c defaults -c anaconda -y
- source activate pymon
- python setup.py develop
- mkdir -p build/public
- mkdir public
pymon_run_test:
stage: test
script:
- pytest
pages:
stage: deploy
except:
- branchs
script:
- conda install --file docs/requirements.txt -c defaults -c conda-forge -c anaconda -c pkgs/main -y
- cd docs/sources/ && make html && cd -
- mv docs/sources/_build/html/* public/
artifacts:
paths:
- public/
expire_in: 1 year
================================================
FILE: .pre-commit-config.yaml
================================================
repos:
- repo: local
hooks:
- id: black
name: black
entry: black
language: system
pass_filenames: true
types: [python]
- id: flake8
name: flake8
entry: flake8 --max-line-length=120
language: system
pass_filenames: true
types: [python]
- id: isort
name: isort
entry: isort
language: system
pass_filenames: true
types: [python]
================================================
FILE: .readthedocs.yml
================================================
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: docs/sources/conf.py
# Optionally build your docs in additional formats such as PDF and ePub
formats: all
conda:
environment: docs/env.yml
================================================
FILE: AUTHORS
================================================
Project developed and lead by Jean-Sébastine Dieu.
Contributors include:
- Raymond Gauthier (jraygauthier) added Python 3.5 support.
- Kyle Altendorf (altendky) fixed bugs on session teardown
- Hannes Engelhardt (veritogen) added Bitbucket CI support.
================================================
FILE: CONTRIBUTING.rst
================================================
=============================
Contribution, getting started
=============================
Contributions are highly welcomed and appreciated. Every little help counts,
so do not hesitate!
.. contents::
:depth: 2
:backlinks: none
Create your own development environment
---------------------------------------
We use conda as our main packaging system, though pip work as well. Nevertheless,
the following instructions describe how to make your development environment using conda.
#. Create a new environment:
conda create -n pytest-monitor-dev python=3 -c https://conda.anaconda.org/conda-forge -c defaults
#. Install the dependencies
conda install --file requirements.txt -n pytest-monitor-dev -c https://conda.anaconda.org/conda-forge -c defaults
#. Activate your environment
conda activate pytest-monitor-dev
#. Install pytest-monitor in development mode
python setup.py develop
#. You're done!
.. _submitfeedback:
Feature request and feebacks
----------------------------
We'd like to hear about your propositions and suggestions. Feel free to
`submit them as issues <https://github.com/CFMTech/pytest-monitor/issues>`_ and:
* Explain in detail how they should work.
* Keep the scope as narrow as possible. This will make it easier to implement.
.. _reportbugs:
Report bugs
-----------
Report bugs for pytest-monitor in the issue tracker. Every filed bugs should include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting, specifically:
* the Python interpreter version
* installed libraries
* and pytest version.
* Detailed steps to reproduce the bug.
.. _fixbugs:
Fix bugs
--------
Look through the `GitHub issues for bugs <https://github.com/CFMTech/pytest-monitor>`_.
:ref:`Talk <contact>` to developers to find out how you can fix specific bugs.
Implement features
------------------
Look through the `GitHub issues for enhancements <https://github.com/CFMTech/pytest-monitor/labels/type:%20enhancement>`_.
:ref:`Talk <contact>` to developers to find out how you can implement specific
features.
.. _`pull requests`:
.. _pull-requests:
Preparing Pull Requests
-----------------------
Short version
~~~~~~~~~~~~~
#. Fork the repository.
#. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed.
#. Target ``master`` for bugfixes and doc changes.
#. Target ``features`` for new features or functionality changes.
#. Follow **PEP-8** for naming and `black <https://github.com/psf/black>`_ for formatting.
#. Tests are run using ``tox``::
tox -e linting,py37
The test environments above are usually enough to cover most cases locally.
#. Write a ``changelog`` entry: ``changelog/2574.bugfix.rst``, use issue id number
and one of ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or
``trivial`` for the issue type.
#. Unless your change is a trivial or a documentation fix (e.g., a typo or reword of a small section) please
add yourself to the ``AUTHORS`` file, in alphabetical order.
Long version
~~~~~~~~~~~~
What is a "pull request"? It informs the project's core developers about the
changes you want to review and merge. Pull requests are stored on
`GitHub servers <https://github.com/CFMTech/pytest-monitor/pulls>`_.
Once you send a pull request, we can discuss its potential modifications and
even add more commits to it later on. There's an excellent tutorial on how Pull
Requests work in the
`GitHub Help Center <https://help.github.com/articles/using-pull-requests/>`_.
Here is a simple overview, with pytest-specific bits:
#. Fork the
`pytest GitHub repository <https://github.com/CFMTech/pytest-monitor>`__. It's
fine to use ``pytest`` as your fork repository name because it will live
under your user.
#. Clone your fork locally using `git <https://git-scm.com/>`_ and create a branch::
$ git clone git@github.com:YOUR_GITHUB_USERNAME/pytest.git
$ cd pytest
# now, to fix a bug create your own branch off "master":
$ git checkout -b fix/your-bugfix-branch-name master
# or to instead add a feature create your own branch off "master":
$ git checkout -b feature/your-feature-branch-name master
Given we have "major.minor.micro" version numbers, bugfixes will usually
be released in micro releases whereas features will be released in
minor releases and incompatible changes in major releases.
If you need some help with Git, follow this quick start
guide: https://git.wiki.kernel.org/index.php/QuickStart
#. Install `pre-commit <https://pre-commit.com>`_ and its hook on the pytest repo:
**Note: pre-commit must be installed as admin, as it will not function otherwise**::
$ pip install --user pre-commit
$ pre-commit install
Afterwards ``pre-commit`` will run whenever you commit.
https://pre-commit.com/ is a framework for managing and maintaining multi-language pre-commit hooks
to ensure code-style and code formatting is consistent.
#. Install tox
Tox is used to run all the tests and will automatically setup virtualenvs
to run the tests in.
(will implicitly use http://www.virtualenv.org/en/latest/)::
$ pip install tox
#. Run all the tests
You need to have Python 3.7 available in your system. Now
running tests is as simple as issuing this command::
$ tox -e linting,py37
This command will run tests via the "tox" tool against Python 3.7
and also perform "lint" coding-style checks.
#. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming.
You can pass different options to ``tox``. For example, to run tests on Python 3.7 and pass options to pytest
(e.g. enter pdb on failure) to pytest you can do::
$ tox -e py37 -- --pdb
Or to only run tests in a particular test module on Python 3.7::
$ tox -e py37 -- testing/test_config.py
When committing, ``pre-commit`` will re-format the files if necessary.
#. If instead of using ``tox`` you prefer to run the tests directly, then we suggest to create a virtual environment and use
an editable install with the ``testing`` extra::
$ python3 -m venv .venv
$ source .venv/bin/activate # Linux
$ .venv/Scripts/activate.bat # Windows
$ pip install -e ".[testing]"
Afterwards, you can edit the files and run pytest normally::
$ pytest testing/test_config.py
#. Commit and push once your tests pass and you are happy with your change(s)::
$ git commit -a -m "<commit message>"
$ git push -u
#. Create a new changelog entry in ``changelog``. The file should be named ``<issueid>.<type>.rst``,
where *issueid* is the number of the issue related to the change and *type* is one of
``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or ``trivial``. You may not create a
changelog entry if the change doesn't affect the documented behaviour of Pytest.
#. Add yourself to ``AUTHORS`` file if not there yet, in alphabetical order.
#. Finally, submit a pull request through the GitHub website using this data::
head-fork: YOUR_GITHUB_USERNAME/pytest
compare: your-branch-name
base-fork: pytest-dev/pytest
base: master # if it's a bugfix
base: features # if it's a feature
================================================
FILE: LICENSE
================================================
The MIT License (MIT)
Copyright (c) 2020 Capital Fund Management
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
================================================
FILE: MANIFEST.in
================================================
include LICENSE
include README.rst
recursive-exclude * __pycache__
recursive-exclude * *.py[co]
================================================
FILE: README.rst
================================================
.. image:: docs/sources/_static/pytestmonitor_readme.png
:width: 160
:align: center
:alt: Pytest-Monitor
------
==============
pytest-monitor
==============
.. image:: https://readthedocs.org/projects/pytest-monitor/badge/?version=latest
:target: https://pytest-monitor.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://img.shields.io/pypi/v/pytest-monitor.svg
:target: https://pypi.org/project/pytest-monitor
:alt: PyPI version
.. image:: https://img.shields.io/pypi/pyversions/pytest-monitor.svg
:target: https://circleci.com/gh/jsd-spif/pymonitor.svg?style=svg&circle-token=cdf89a7212139aff0cc236227cb519363981de0b
:alt: Python versions
.. image:: https://circleci.com/gh/CFMTech/pytest-monitor/tree/master.svg?style=shield&circle-token=054adaaf6a19f4f55a4f0ad419649f1807e70ea9
:target: https://circleci.com/gh/CFMTech/pytest-monitor/tree/master
:alt: See Build Status on Circle CI
.. image:: https://anaconda.org/conda-forge/pytest-monitor/badges/platforms.svg
:target: https://anaconda.org/conda-forge/pytest-monitor
.. image:: https://anaconda.org/conda-forge/pytest-monitor/badges/version.svg
:target: https://anaconda.org/conda-forge/pytest-monitor
.. image:: https://img.shields.io/badge/License-MIT-blue.svg
:target: https://opensource.org/licenses/MIT
Pytest-monitor is a pytest plugin designed for analyzing resource usage.
----
Features
--------
- Analyze your resources consumption through test functions:
* memory consumption
* time duration
* CPU usage
- Keep a history of your resource consumption measurements.
- Compare how your code behaves between different environments.
Usage
-----
Simply run *pytest* as usual: *pytest-monitor* is active by default as soon as it is installed.
After running your first session, a .pymon `sqlite` database will be accessible in the directory where pytest was run.
Example of information collected for the execution context:
+-----------------------------------+-----------+-------------------+---------+-------------------------------------------+---------------+--------------------+------------+-------------------------------+-------------------------------+--------------------------------------------------+
| ENV_H| CPU_COUNT| CPU_FREQUENCY_MHZ| CPU_TYPE| CPU_VENDOR| RAM_TOTAL_MB | MACHINE_NODE |MACHINE_TYPE| MACHINE_ARCH | SYSTEM_INFO | PYTHON_INFO|
+===================================+===========+===================+=========+===========================================+===============+====================+============+===============================+===============================+==================================================+
| 8294b1326007d9f4c8a1680f9590c23d | 36 | 3000 | x86_64 | Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz | 772249 | some.host.vm.fr | x86_64 | 64bit | Linux - 3.10.0-693.el7.x86_64 | 3.6.8 (default, Jun 28 2019, 11:09:04) \n[GCC ...|
+-----------------------------------+-----------+-------------------+---------+-------------------------------------------+---------------+--------------------+------------+-------------------------------+-------------------------------+--------------------------------------------------+
Here is an example of collected data stored in the result database:
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
| RUN_DATE| ENV_H| SCM_ID| ITEM_START_TIME| ITEM| KIND| COMPONENT| TOTAL_TIME| USER_TIME| KERNEL_TIME| CPU_USAGE| MEM_USAGE|
+==============================+==================================+==========================================+============================+========================================+==========+==========+============+===========+=============+============+===========+
| 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:36.890477 | pkg1.test_mod1/test_sleep1 | function | None | 1.005669 | 0.54 | 0.06 | 0.596618 | 1.781250 |
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
| 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:39.912029 | pkg1.test_mod1/test_heavy[10-10] | function | None | 0.029627 | 0.55 | 0.08 | 21.264498 | 1.781250 |
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
| 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:39.948922 | pkg1.test_mod1/test_heavy[100-100] | function | None | 0.028262 | 0.56 | 0.09 | 22.998773 | 1.781250 |
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
| 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:39.983869 | pkg1.test_mod1/test_heavy[1000-1000] | function | None | 0.030131 | 0.56 | 0.10 | 21.904277 | 2.132812 |
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
| 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:40.020823 | pkg1.test_mod1/test_heavy[10000-10000] | function | None | 0.060060 | 0.57 | 0.14 | 11.821601 | 41.292969 |
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
| 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:40.093490 | pkg1.test_mod2/test_sleep_400ms | function | None | 0.404860 | 0.58 | 0.15 | 1.803093 | 2.320312 |
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
| 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:40.510525 | pkg2.test_mod_a/test_master_sleep | function | None | 5.006039 | 5.57 | 0.15 | 1.142620 | 2.320312 |
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
| 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:45.530780 | pkg3.test_mod_cl/test_method1 | function | None | 0.030505 | 5.58 | 0.16 | 188.164762 | 2.320312 |
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
| 2020-02-17T09:11:36.731233 | 8294b1326007d9f4c8a1680f9590c23d | de23e6bdb987ae21e84e6c7c0357488ee66f2639 | 2020-02-17T09:11:50.582954 | pkg4.test_mod_a/test_force_monitor | function | test | 1.005015 | 11.57 | 0.17 | 11.681416 | 2.320312 |
+------------------------------+----------------------------------+------------------------------------------+----------------------------+----------------------------------------+----------+----------+------------+-----------+-------------+------------+-----------+
Documentation
-------------
A full documentation is `available <https://pytest-monitor.readthedocs.io/en/latest/?badge=latest>`_.
Installation
------------
You can install *pytest-monitor* via *conda* (through the `conda-forge` channel)::
$ conda install pytest-monitor -c https://conda.anaconda.org/conda-forge
Another possibility is to install *pytest-monitor* via `pip`_ from `PyPI`_::
$ pip install pytest-monitor
Requirements
------------
You will need a valid Python 3.5+ interpreter. To get measures, we rely on:
- *psutil* to extract CPU usage
- *memory_profiler* to collect memory usage
- and *pytest* (obviously!)
**Note: this plugin doesn't work with unittest**
Storage backends
----------------
By default, pytest-monitor stores its result in a local SQLite3 local database, making results accessible.
If you need a more powerful way to analyze your results, checkout the
`monitor-server-api`_ which brings both a REST Api for storing and historize your results and an API to query your data.
An alternative service (using MongoDB) can be used thanks to a contribution from @dremdem: `pytest-monitor-backend`_.
Contributing
------------
Contributions are very welcome. Tests can be run with `tox`_. Before submitting a pull request, please ensure
that:
* both internal tests and examples are passing.
* internal tests have been written if necessary.
* if your contribution provides a new feature, make sure to provide an example and update the documentation accordingly.
License
-------
This code is distributed under the `MIT`_ license. *pytest-monitor* is free, open-source software.
Issues
------
If you encounter any problem, please `file an issue`_ along with a detailed description.
Author
------
The main author of `pytest-monitor` is Jean-Sébastien Dieu, who can be reached at jdieu@salsify.fr.
----
This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `cookiecutter-pytest-plugin`_ template.
.. _`Cookiecutter`: https://github.com/audreyr/cookiecutter
.. _`@hackebrot`: https://github.com/hackebrot
.. _`MIT`: http://opensource.org/licenses/MIT
.. _`BSD-3`: http://opensource.org/licenses/BSD-3-Clause
.. _`GNU GPL v3.0`: http://www.gnu.org/licenses/gpl-3.0.txt
.. _`Apache Software License 2.0`: http://www.apache.org/licenses/LICENSE-2.0
.. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin
.. _`file an issue`: https://github.com/CFMTech/pytest-monitor/issues
.. _`pytest`: https://github.com/pytest-dev/pytest
.. _`tox`: https://tox.readthedocs.io/en/latest/
.. _`pip`: https://pypi.org/project/pip/
.. _`PyPI`: https://pypi.org/project
.. _`monitor-server-api`: : https://github.com/CFMTech/monitor-server-api
.. _`pytest-monitor-backend`: https://github.com/dremdem/pytest-monitor-backend
================================================
FILE: docs/env.yml
================================================
name: docenv
channels:
- anaconda
dependencies:
- python==3.7
- pip:
- alabaster==0.7.12
- asn1crypto==1.3.0
- Babel==2.8.0
- certifi==2019.11.28
- cffi==1.13.2
- chardet==3.0.4
- cryptography==2.8
- docutils==0.16
- idna==2.8
- imagesize==1.2.0
- Jinja2==2.11.1
- lz4==3.0.2
- MarkupSafe==1.1.1
- packaging==20.1
- pycparser==2.19
- Pygments==2.5.2
- pyOpenSSL==19.1.0
- pyparsing==2.4.6
- PySocks==1.7.1
- pytz==2019.3
- releases==1.6.3
- requests==2.22.0
- semantic-version==2.6.0
- six==1.14.0
- snowballstemmer==2.0.0
- Sphinx==2.3.1
- sphinx-rtd-theme==0.4.3
- sphinxcontrib-applehelp==1.0.1
- sphinxcontrib-devhelp==1.0.1
- sphinxcontrib-htmlhelp==1.0.2
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.2
- sphinxcontrib-serializinghtml==1.1.3
- urllib3==1.25.8
================================================
FILE: docs/requirements.txt
================================================
alabaster
babel
sphinx
sphinx-releases
sphinx_rtd_theme
semantic_version==2.6.*
make
pygraphviz
================================================
FILE: docs/sources/Makefile
================================================
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " applehelp to make an Apple Help Book"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " coverage to run coverage check of the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pytest-cookiecutterplugin_name.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pytest-cookiecutterplugin_name.qhc"
applehelp:
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
@echo
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
@echo "N.B. You won't be able to view it unless you put it in" \
"~/Library/Documentation/Help or install it in your application" \
"bundle."
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/pytest-cookiecutterplugin_name"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pytest-cookiecutterplugin_name"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
coverage:
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
@echo "Testing of coverage in the sources finished, look at the " \
"results in $(BUILDDIR)/coverage/python.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
================================================
FILE: docs/sources/changelog.rst
================================================
=========
Changelog
=========
* :release:`to be discussed`
* :feature:`#75` Automatically gather CI build information for Bitbucket CI.
* :release:`1.6.6 <2023-05-06>`
* :bug:`#64` Prepare version 1.7.0 of pytest-monitor. Last version to support Python <= 3.7 and all pytest <= 5.*
* :bug:`#0` Improve and fix some CI issues, notably one that may cause python to not be the requested one but a more recent one.
* :release:`1.6.5 <2022-10-16>`
* :bug:`#60` Make sure that when psutil cannot fetch cpu frequency, the fallback mechanism is used.
* :release:`1.6.4 <2022-05-18>`
* :bug:`#56` Force the CPU frequency to 0 and emit a warning when unable to fetch it from the system.
* :bug:`#54` Fix a bug that crashes the monitor upon non ASCII characters in commit log under Perforce. Improved P4 change number extraction.
* :release:`1.6.3 <2021-12-22>`
* :bug:`#50` Fix a bug where a skipping fixture resulted in an exception during teardown.
* :release:`1.6.2 <2021-08-24>`
* :bug:`#40` Fix a bug that cause the garbage collector to be disable by default.
* :release:`1.6.1 <2021-08-23>`
* :bug:`#43` Fixes a bug that prevent sending session tags correctly.
* :bug:`#40` Force garbage collector to run between tests (better result accuracy)
* :release:`1.6.0 <2021-04-16>`
* :feature:`#0` Support for python 3.5
* :feature:`#35` Better support for Doctest item.
* :feature:`#24` Prefer JSON data type for storing session extended information instead of plain text.
* :release:`1.5.1 <2021-02-05>`
* :bug:`#31` Rename option --remote into --remote-server as it seems to conflict with some plugins.
* :bug:`#23` Fix requirements minimum version.
* :release:`1.5.0 <2020-11-20>`
* :feature:`25` Automatically gather CI build information (supported CI are Drone CI, Gitlab CI, Jenkins CI, Travis CI, Circle CI)
* :bug:`#23 major` psutil min requirement is now 5.1.0
* :bug:`#28 major` Fix a bug that cause output to be printed multiple times
* :release:`1.4.0 <2020-06-04>`
* :feature:`21` Using json format to populate the RUN_DESCRIPTION field (through --description and --tag fields)
* :release:`1.3.0 <2020-05-12>`
* :feature:`19` Normalized http codes used for sending metrics to a remote server.
* :release:`1.2.0 <2020-04-17>`
* :feature:`13` Change default analysis scope to function.
* :bug:`12 major` No execution contexts pushed when using a remote server.
* :bug:`14 major` A local database is always created even with --no-db option passed.
* :release:`1.1.1 <2020-03-31>`
* :bug:`9` Fix remote server interface for sending measures.
* :release:`1.1.0 <2020-03-30>`
* :feature:`5` Extend item information and separate item from its variants.
* :feature:`3` Compute user time and kernel time on a per test basis for clarity and ease of exploitation.
* :feature:`4` Added an option to add a description to a pytest run
* :release:`1.0.1 <2020-03-18>`
* :bug:`2` pytest-monitor hangs infinitely when a pytest outcome (skip, fail...) is issued.
* :release:`1.0.0 <2020-02-20>`
* :feature:`0` Initial release
================================================
FILE: docs/sources/conf.py
================================================
# -*- coding: utf-8 -*-
#
# pytest-monitor documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 1 00:43:18 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import pathlib
def read_version():
init = pathlib.Path(__file__).parent.parent.parent / "pytest_monitor" / "__init__.py"
with init.open("r") as pkg_init_f:
version_read = [line.strip() for line in pkg_init_f if line.startswith("__version__")]
if len(version_read) > 1:
raise ValueError('Multiple version found in "pytest_monitor" package!')
if not version_read:
raise ValueError('No version found in "pytest_monitor" package!')
return version_read[0].split("=", 1)[1].strip("\" '")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.ifconfig",
"sphinx.ext.todo",
"sphinx.ext.graphviz",
"releases",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pytest-monitor"
copyright = "2019, Jean-Sébastien Dieu" # noqa A001
author = "Jean-Sébastien Dieu"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = read_version()
# The full version, including alpha/beta/rc tags.
release = f"pytest-monitor v{version}"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/pytestmonitor_alpha.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pytestmonitor-doc"
================================================
FILE: docs/sources/configuration.rst
================================================
========================
Configuring your session
========================
`pytest-monitor` gives you flexibility for running your test suite.
In this section, we will discuss the different available options, and how they influence the `pytest` session.
Scope Restriction
-----------------
`pytest-monitor` is able to restrict the scope of the analysis. As a default,
only tests functions discovered by pytest are monitored.
Sometime, you might want to monitor a whole module or test session. This can be
achieved thanks to the *\-\-restrict-scope-to* option.
If a scope restriction is set, then the monitoring will be performed at the selected levels.
For example, monitoring at both function and module level can be achieved by the following command:
.. code-block:: shell
pytest --restrict-scope-to function,module
Accepted values are:
* function: test functions will be monitored individually, leading to one entry per test function.
* module: each discovered module will be monitored regardless of the others.
* class: test class objects will be monitored individually.
* session: monitor the whole session.
It is important to realize that using multiple scopes has an impact on the monitoring measures. For example, the `pytest-monitor` code that monitors functions does consume resources for each function (notably compute time). As a consequence, the resources consumed by their module will include the resources consumed by `pytest-monitor` for each function. If individual functions were not monitored, the resource consumption reported for the module would therefore be lower.
Due to the way `pytest` handles test modules, some specificities apply when monitoring modules:
* The total measured elapsed time includes the setup/teardown process for each function.
On the other hand, a function object measures only the duration of the function run (without the setup and teardown parts).
* Consumed memory will be the peak of memory usage during the whole module run.
Handling parameterized tests
----------------------------
Parameterized tests can be introspected by `pytest-monitor` during the setup phase: their real
name is based on the parameter values. This uses the string representation of the parameters (so you want to make sure that this representation suits your needs).
Let's consider the following test:
.. code-block:: python
@pytest.mark.parametrize(('asint', 'asstr'), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")])
def test_p(asint, asstr):
assert asint == int(asstr)
By default, `pytest-monitor` will generate the following entries:
* test_p[10-10]
* test_p[100-100]
* test_p[1000-1000]
* test_p[10000-10000]
You can ask `pytest-monitor` to tag parameters with their names (as provided by ``@pytest.mark.parametrize``), with the following option:
.. code-block:: shell
pytest --parametrization-explicit
which will lead to the following entries:
* test_p[asint_10-asstr_10]
* test_p[asint_100-asstr_100]
* test_p[asint_1000-asstr_1000]
* test_p[asint_10000-asstr_10000]
Disable monitoring
------------------
If you need for some reason to disable the monitoring, pass the *\-\-no-monitor* option.
Describing a run
----------------
Sometimes, you might want to compare identical state of your code. In such cases, relying only on the scm
references and the run date of the session is not sufficient. For that, `pytest-monitor` can assist you by tagging
your session using description and tags.
Description and tags
~~~~~~~~~~~~~~~~~~~~
The description should be used to provide a brief summary of your run while tags can be used to
set special information you want to focus during your analysis.
Setting a description is as simple as this:
.. code-block:: shell
bash $> pytest --description "Any run description you want"
Flagging your session with specific information is as complex as setting the description:
.. code-block:: shell
bash $> pytest --tag pandas=1.0.1 --tag numpy=1.17
This will result in a session with the following description:
.. code-block:: text
{
"pandas": "1.0.1",
"numpy": "1.17"
}
You can perfectly use both options to fully describe your session:
.. code-block:: shell
bash $> pytest --tag pandas=1.0.1 --tag numpy=1.17 --description "Your summary"
This will result in a session with the following description:
.. code-block:: text
{
"msg": "Your summary",
"pandas": "1.0.1",
"numpy": "1.17"
}
Describing a CI build
~~~~~~~~~~~~~~~~~~~~~
For convenience pytest-monitor automatically extends the session's description with some information
extracted from the CI build. For that purpose, pytest-monitor reads the environment
at the start of the test session in search for:
* **pipeline_branch**, which can either represent a CI pipeline name (preferentially) or the source code branch name.
* **pipeline_build_no**, which is the pipeline build number (if available) or the pipeline ID if any.
* **__ci__** which provides you the ci system used.
Currently, pytest-monitor supports the following CI:
* Gitlab CI
* Travis CI
* Jenkins
* Drone CI
* Circle CI
* Bitbucket CI
The following table explains how both fields are mapped:
+--------------+-----------------------------------+-----------------------+---------------+
| CI | pipeline_branch | pipeline_build_no | __ci__ |
+==============+===================================+=======================+===============+
| Jenkins CI | BRANCH_NAME if set else JOB_NAME | BUILD_NUMBER | jenkinsci |
+--------------+-----------------------------------+-----------------------+---------------+
| Drone CI | DRONE_REPO_BRANCH | DRONE_BUILD_NUMBER | droneci |
+--------------+-----------------------------------+-----------------------+---------------+
| Circle CI | CIRCLE_JOB | CIRCLE_BUILD_NUM | circleci |
+--------------+-----------------------------------+-----------------------+---------------+
| Gitlab CI | CI_JOB_NAME | CI_PIPELINE_ID | gitlabci |
+--------------+-----------------------------------+-----------------------+---------------+
| Travis CI | TRAVIS_BUILD_ID | TRAVIS_BUILD_NUMBER | travisci |
+--------------+-----------------------------------+-----------------------+---------------+
| Bitbucket CI| BITBUCKET_BRANCH | BITBUCKET_BUILD_NUMBER| bitbucketci |
+--------------+-----------------------------------+-----------------------+---------------+
Note that none of these two fields will be added if:
* the CI context is incomplete
* the CI context cannot be computed.
Parameters affecting measures
-----------------------------
By default, pytest-monitor runs the garbage collector prior to execute the test function.
This leads to finer memory measurements. In the case where you want to disable this call to the
garbage collector, you just have to set the option `--no-gc` on the command line.
.. code-block:: shell
bash $> pytest --no-gc
Forcing CPU frequency
---------------------
Under some circumstances, you may want to set the CPU frequency instead of asking `pytest-monitor` to compute it.
To do so, you can either:
- ask `pytest-monitor` to use a preset value if it does not manage to compute the CPU frequency
- or to not try computing the CPU frequency and use your preset value.
Two environment variables controls this behaviour:
- `PYTEST_MONITOR_CPU_FREQ` allows you to preset a value for the CPU frequency. It must be a float convertible value.
This value will be used if `pytest-monitor` cannot compute the CPU frequency. Otherwise, `0.0` will be used as a
default value.
- `PYTEST_MONITOR_FORCE_CPU_FREQ` instructs `pytest-monitor` to try computing the CPU frequency or not. It expects an
integer convertible value. If not set, or if the integer representation of the value is `0`, then `pytest-monitor` will
try to compute the cpu frequency and defaults to the usecase describe for the previous environment variable.
If it set and not equal to `0`, then we use the value that the environment variable `PYTEST_MONITOR_CPU_FREQ` holds
(`0.0` if not set).
================================================
FILE: docs/sources/contributing.rst
================================================
==================
Contribution guide
==================
If you want to contribute to this project, you are welcome to do so!
Create your own development environment
---------------------------------------
We use conda as our main packaging system, though pip works as well.
The following instructions describe how to create your development environment using conda:
#. Create a new environment:
.. code-block:: bash
conda create -n pytest-monitor-dev python=3 -c https://conda.anaconda.org/conda-forge -c defaults
#. Install the dependencies:
.. code-block:: bash
conda install --file requirements.dev.txt -n pytest-monitor-dev -c https://conda.anaconda.org/conda-forge -c defaults
#. Make sure to have pip install or install it if missing:
.. code-block:: bash
# Check for pip
conda list | grep pip
# Install if needed
conda install -n pytest-monitor-dev pip -c https://conda.anaconda.org/conda-forge
#. Activate your environment:
.. code-block:: bash
conda activate pytest-monitor-dev
#. Install `pytest-monitor` in development mode:
.. code-block:: bash
python -m pip install -e ".[dev]"
#. Install the pre-commit hooks
.. code-block:: bash
pre-commit install
#. You're done!
Feature requests and feedback
-----------------------------
We would be happy to hear about your propositions and suggestions. Feel free to
`submit them as issues <https://github.com/CFMTech/pytest-monitor/issues>`_ and:
* Explain in details the expected behavior.
* Keep the scope as narrow as possible. This will make them easier to implement.
.. _reportbugs:
Bug reporting
-------------
Report bugs for `pytest-monitor` in the `issue tracker <https://github.com/CFMTech/pytest-monitor/issues>`_. Every filed bugs should include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting, specifically:
* the Python interpreter version,
* installed libraries,
* and your `pytest` version.
* Detailed steps to reproduce the bug.
.. _fixbugs:
Bug fixing
----------
Look through the `GitHub issues for bugs <https://github.com/CFMTech/pytest-monitor/issues>`_.
Talk to developers to find out how you can fix specific bugs.
Feature implementation
----------------------
Look through the `GitHub issues for enhancements <https://github.com/CFMTech/pytest-monitor/labels/type:%20enhancement>`_.
Talk to developers to find out how you can implement specific features.
Thank you!
================================================
FILE: docs/sources/index.rst
================================================
.. pytest-monitor documentation master file, created by
sphinx-quickstart on Thu Oct 1 00:43:18 2015.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to pytest-monitor's documentation!
===============================================================
Contents:
.. toctree::
:maxdepth: 2
introduction
installation
configuration
run
operating
remote
contributing
changelog
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
================================================
FILE: docs/sources/installation.rst
================================================
============
Installation
============
`pytest-monitor` is a plugin for `pytest`.
Supported environments
----------------------
`pytest-monitor` currently works on *Linux* and *macOS*. Support for *Windows* is experimental and not tested.
**You will need pytest 4.4+ to run pytest-monitor.**
We support all versions of Python >= 3.6.
From conda
----------
Simply run the following command to get it installed in your current environment
.. code-block:: bash
conda install pytest-monitor -c https://conda.anaconda.org/conda-forge
From pip
--------
Simply run the following command to get it installed
.. code-block:: bash
pip install pytest-monitor
================================================
FILE: docs/sources/introduction.rst
================================================
============
Introduction
============
`pytest-monitor` tracks the resources (like memory and compute time) consumed by a test suite, so that you
can make sure that your code does not use too much of them.
Thanks to `pytest-monitor`, you can check resource consumption in particular through continuous integration, as this is done by monitoring the consumption of test functions. These tests can be functional (as usual) or be dedicated to the resource consumption checks.
Use cases
---------
Examples of use cases include technical stack updates, and code evolutions.
Technical stack updates
~~~~~~~~~~~~~~~~~~~~~~~
In the Python world, libraries often depends on several packages. By updating some (or all) of the dependencies,
you update code that you do not own and therefore do not control. Tracking your application's resource footprint
can prevent unwanted resource consumption, and can thus validate the versions of the packages that you depend on.
Code evolution
~~~~~~~~~~~~~~
Extending your application with new features, or fixing its bugs, might have an impact on the core of your program. The performance of large applications or libraries can be difficult to assess, but by monitoring resource consumption, `pytest-monitor` allows you to check that despite code udpates, the performance of your code remains within desirable limits.
Usage
-----
Simply run pytest as usual: pytest-monitor is active by default as soon as it is installed. After running your first session, a .pymon sqlite database will be accessible in the directory where pytest was run.
================================================
FILE: docs/sources/make.bat
================================================
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. xml to make Docutils-native XML files
echo. pseudoxml to make pseudoxml-XML files for display purposes
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
echo. coverage to run coverage check of the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
REM Check if sphinx-build is available and fallback to Python version if any
%SPHINXBUILD% 2> nul
if errorlevel 9009 goto sphinx_python
goto sphinx_ok
:sphinx_python
set SPHINXBUILD=python -m sphinx.__init__
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
:sphinx_ok
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\pytest-cookiecutterplugin_name.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\pytest-cookiecutterplugin_name.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdf" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdfja" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf-ja
cd %~dp0
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
if "%1" == "coverage" (
%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
if errorlevel 1 exit /b 1
echo.
echo.Testing of coverage in the sources finished, look at the ^
results in %BUILDDIR%/coverage/python.txt.
goto end
)
if "%1" == "xml" (
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The XML files are in %BUILDDIR%/xml.
goto end
)
if "%1" == "pseudoxml" (
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
goto end
)
:end
================================================
FILE: docs/sources/operating.rst
================================================
==================
Operating measures
==================
Storage
-------
Once measures are collected, `pytest-monitor` dumps them either in a local database
or sends them to a monitor server.
In the case of local storage, a `sqlite3` database is used, as it is lightweight and
is provided with many Python distributions (being part of the standard library).
Measures are stored in the `pytest` invocation directory, in a database file named **.pymon**.
You are free to override the name of this database by setting the `--db` option:
.. code-block:: shell
pytest --db /path/to/your/monitor/database
You can also sends your tests result to a monitor server (under development at that time) in order to centralize
your Metrics and Execution Context (see below):
.. code-block:: shell
pytest --remote-server server:port
Execution Context, Metrics and Session
--------------------------------------
We distinguish two kinds of measures:
- those related to the **Execution Context**. This is related to your machine (node name, CPU, memory…),
- the **Metrics** related to the tests themselves (this can be the memory used, the CPU usage…).
Regarding tests related **metrics**, one can see metrics which are tests independent and those which
are session independent (session start date, scm reference). For this reason, `pytest-monitor` uses
a notion of session metrics to which each tests are linked to.
Additionally, each test is linked to an Execution Context so that comparisons between runs is possible.
Model
-----
The local database associates each test Metrics to the specific context in which it was run:
.. image:: _static/db_relationship.png
Execution Context
~~~~~~~~~~~~~~~~~
Execution Contexts are computed prior to the start of the `pytest`
session. An Execution Context describes much of the machine settings:
CPU_COUNT (integer)
Number of online CPUs the machine can use.
CPU_FREQUENCY_MHZ (integer)
Base frequency of the CPUs (in megahertz). Set to 0 if unable to fetch it.
CPU_VENDOR (TEXT 256 CHAR)
Full CPU vendor string.
RAM_TOTAL_MB (INTEGER)
Total usable RAM (physical memory) in megabytes.
MACHINE_NODE (TEXT 512 CHAR)
Fully qualified domain name of the machine.
MACHINE_TYPE (TEXT 32 CHAR)
Machine type.
MACHINE_ARCH (TEXT 16 CHAR)
Mode used (64 bits…).
SYSTEM_INFO (TEXT 256 CHAR)
Operating system name and release level.
PYTHON_INFO (TEXT 512 CHAR)
Python information (version, compilation mode used and so on…)
ENV_H (TEXT 64 CHAR)
Hash string used to uniquely identify an execution context.
In the local database, Execution Contexts are stored in table `EXECUTION_CONTEXTS`.
Sessions
--------
SESSION_H (TEXT 64 CHAR)
Hash string used to uniquely identify a session run.
RUN_DATE (TEXT 64 CHAR)
Time at which the `pytest` session was started. The full format is
'YYYY-MM-DDTHH:MM:SS.uuuuuu' (ISO 8601 format with UTC time). The fractional second part is omitted if it is zero.
SCM_ID (TEXT 128 CHAR)
Full reference to the source code management system if any.
RUN_DESCRIPTION (TEXT 1024 CHAR)
A free text field that you can use to describe a session run.
In the local database, Sessions are stored under the table `TEST_SESSIONS`.
Metrics
~~~~~~~
Metrics are collected at test, class and/or module level. For both classes and modules, some of the
metrics can be skewed due to the technical limitations described earlier.
SESSION_H (TEXT 64 CHAR)
Session context used for this test.
ENV_H (TEXT 64 CHAR)
Execution Context used for this test.
ITEM_START_TIME (TEXT 64 CHAR)
Time at which the item test was launched. The full format is
'YYYY-MM-DDTHH:MM:SS.uuuuuu' (ISO 8601 format with UTC time). The fractional second part is omitted if it is zero.
ITEM_PATH (TEXT 4096 CHAR)
Path of the item, using an import compatible string specification.
ITEM (TEXT 2096 CHAR)
Initial item name, without any variant.
ITEM_VARIANT varchar(2048)
Full item name, with parametrization used if any.
ITEM_FS_LOC varchar(2048)
Item's module path relative to pytest invocation directory.
KIND (TEXT 64 CHAR)
Type of item (function, class, module…).
COMPONENT (TEXT 512 CHAR), NULLABLE
Component to which the test belongs, if any (this is used when sending results to a server, for identifying each source of Metrics).
TOTAL_TIME (FLOAT)
Total time spent running the item (in seconds).
USER_TIME (FLOAT)
Time spent in User mode (in seconds).
KERNEL_TIME (FLOAT)
Time spent in Kernel mode (in seconds).
CPU_USAGE (FLOAT)
System-wide CPU usage as a percentage (100 % is equivalent to one core).
MEM_USAGE (FLOAT)
Maximum resident memory used during the test execution (in megabytes).
In the local database, these Metrics are stored in table `TEST_METRICS`.
================================================
FILE: docs/sources/remote.rst
================================================
Use of a remote server
======================
You can easily send your metrics to a remote server. This can turn usefull when it comes to running
tests in parallel with plugins such as *pytest-xdist* of *pytest-parallel*.
To do so, instruct pytest with the remote server address to use:
.. code-block:: shell
bash $> pytest --remote-server myremote.server.net:port
This way, *pytest-monitor* will automatically send and query the remote server as soon as it gets
a need. Note that *pytest-monitor* will revert to a normal behaviour if:
- it cannot query the context or the session for existence
- it cannot create a new context or a new session
Implementing a remote server
============================
How pytest-monitor interacts with a remote server
-------------------------------------------------
The following sequence is used by *pytest-monitor* when using a remote server:
1. Ask the remote server if the **Execution Context** is known.
2. Insert the **Execution Context** if the server knows nothing about it.
3. Ask the remote server if the **Session** is known.
4. Insert the **Session** if the server knows nothing about it.
5. Insert results once measures have been collected.
Used HTTP codes
---------------
Two codes are used by *pytest-monitor* when asked to work with a remote server:
- 200 (OK) is used to indicate that a query has led to a non-empty result.
- 201 (CREATED) is expected by *pytest-monitor** when sending a new entry (**Execution Context**, **Session** or any **Metric**).
- 204 (NO CONTENT) though not checked explicitely should be returned when a request leads to no results.
Mandatory routes
----------------
The following routes are expected to be reachable:
GET /contexts/<str:hash>
Query the system for a **Execution Context** with the given hash.
**Return Codes**: Must return *200* (*OK*) if the **Execution Context** exists, *204* (*NO CONTENT*) otherwise
GET /sessions/<str:hash>
Query the system for a **Session** with the given hash.
**Return Codes**: Must return *200* (*OK*) if the **Session** exists, *204* (*NO CONTENT*) otherwise
POST /contexts/
Request the system to create a new entry for the given **Execution Context**.
Data are sent using Json parameters:
.. code-block:: json
{
cpu_count: int,
cpu_frequency: int,
cpu_type: str,
cpu_vendor: str,
ram_tota: int,
machine_node: str,
machine_type: str,
machine_arch: str,
system_info: str,
python_info: str,
h: str
}
**Return Codes**: Must return *201* (*CREATED*) if the **Execution Context** has been created
POST /sessions/
Request the system to create a new entry for the given **Session**.
Data are sent using Json parameters:
.. code-block:: json
{
session_h: str,
run_date: str,
scm_ref: str,
description: str
}
**Return Codes**: Must return *201* (*CREATED*) if the **Session** has been created
POST /metrics/
Request the system to create a new **Metrics** entry.
Data are sent using Json parameters:
.. code-block:: json
{
session_h: str,
context_h: str,
item_start_time: str,
item_path: str,
item: str,
item_variant: str,
item_fs_loc: str,
kind: str,
component: str,
total_time: float,
user_time: float,
kernel_time: float,
cpu_usage: float,
mem_usage: float
}
**Return Codes**: Must return *201* (*CREATED*) if the **Metrics** has been created
================================================
FILE: docs/sources/run.rst
================================================
========================
Managing your test suite
========================
`pytest-monitor` does not require any specific setup: it is active by default.
Thus all your tests are by default analyzed in order to collect monitored information.
About collecting and storing results
------------------------------------
`pytest-monitor` makes a clear distinction between the execution context and the test metrics.
This distinction can been seen clearly in the code and the initialization sequence:
1. Collect environment values.
Various pieces of information about the machine are collected.
2. Store the context.
The Execution Context collected in step #1 is recorded if not yet known.
3. Prepare the run.
In order to provide more accurate measurements, we "warm up" the context and take an initial set of measurements.
Some will be used for adjusting later measurements.
4. Run tests and enable measurements.
Depending on the item type (function, class or module), we launch the relevant measurements.
Each time a monitored item ends, the measurement results (Metrics) are recorded right away.
5. End session.
If sending the monitoring results to a remote server has been requested, this is when `pytest-monitor` does it.
Selecting tests to monitor
--------------------------
By default, all tests are monitored, even small ones which would not require any specific monitoring.
It is possible to control more finely which tests will be monitored by `pytest-monitor`. This is done through the use of `pytest` markers.
`pytest-monitor` offers two markers for this:
``@pytest.mark.monitor_skip_test``
marks your test for execution, but without any monitoring.
``@pytest.mark.monitor_skip_test_if(cond)``
tells `pytest-monitor` to execute the test but to monitor results
if and only if the condition is true.
Here is an example:
.. code-block:: python
import pytest
import sys
def test_execute_and_monitor():
assert True
@pytest.mark.monitor_skip_test
def test_execute_do_not_monitor():
assert True
@pytest.mark.monitor_skip_test_if(sys.version_info >= (3,))
def test_execute_and_monitor_py3_or_above():
assert True
Disabling monitoring except for some tests
------------------------------------------
`pytest` offers global markers. For example, one can set the default to no monitoring:
.. code-block:: python
import pytest
# With the following global module marker,
# monitoring is disabled by default:
pytestmark = [pytest.mark.monitor_skip_test]
In this case, it is necessary to explicitly activate individual monitoring. This is
accomplished with:
``@pytest.mark.monitor_test``
marks your test as to be executed and monitored, even if monitoring
is disabled for the module.
``@pytest.mark.monitor_test_if(cond)``
tells `pytest-monitor` to execute the test and to monitor results
if and only if the condition is true, regardless of the
module monitor setup.
Continuing the example above:
.. code-block:: python
import time
import sys
def test_executed_not_monitored():
time.sleep(1)
assert True
def test_executed_not_monitored_2():
time.sleep(2)
assert True
@pytest.mark.monitor_test
def test_executed_and_monitored():
assert True
@pytest.mark.monitor_test_if(sys.version_info >= (3, 7))
def test_executed_and_monitored_if_py37():
assert True
Associating your tests to a component
-------------------------------------
`pytest-monitor` allows you to *tag* each test in the database with a "**component**" name. This allows you to identify easily tests that come from a specific part of your application, or for distinguishing test results for two different projects that use the same `pytest-monitor` database.
Setting up a component name can be done at module level:
.. code-block:: python
import time
import pytest
pytest_monitor_component = "my_component" # Component name stored in the results database
def test_monitored():
t_a = time.time()
b_continue = True
while b_continue:
t_delta = time.time() - t_a
b_continue = t_delta < 1
assert not b_continue
If no `pytest_monitor_component` variable is defined, the component is set to the empty string.
In projects with many modules, this can be tedious. `pytest-monitor` therefore allows you to force a fixed component name for the all the tests:
.. code-block:: bash
$ pytest --force-component YOUR_COMPONENT_NAME
This will force the component value to be set to the one you provided, whatever the value of
*pytest_monitor_component* in your test module, if any.
If you need to use a global component name for all your tests while allowing some modules to have a specific component name, you can ask `pytest-monitor` to add a prefix to any module-level component name:
.. code-block:: bash
$ pytest --component-prefix YOUR_COMPONENT_NAME
This way, all tests detected by `pytest` will have their component prefixed with the given value (tests for modules with no `pytest_monitor_component` variable are simply tagged with the prefix).
For instance the following test module:
.. code-block:: python
import time
import pytest
pytest_monitor_component = "component_A"
def test_monitored():
t_a = time.time()
b_continue = True
while b_continue:
t_delta = time.time() - t_a
b_continue = t_delta < 1
assert not b_continue
will yield the following value for the component fields, depending on the chosen command-line option:
+------------------------------------------+-----------------------+
| Command line used | Component value |
+==========================================+=======================+
| pytest --force-component PROJECT_A | PROJECT_A |
+------------------------------------------+-----------------------+
| pytest --component-prefix PROJECT_A | PROJECT_A.component_A |
+------------------------------------------+-----------------------+
================================================
FILE: examples/pkg1/__init__.py
================================================
================================================
FILE: examples/pkg1/test_mod1.py
================================================
import time
import pytest
def test_sleep1():
time.sleep(1)
@pytest.mark.monitor_skip_test()
def test_sleep2():
time.sleep(2)
@pytest.mark.parametrize(("range_max", "other"), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")])
def test_heavy(range_max, other):
assert len(["a" * i for i in range(range_max)]) == range_max
================================================
FILE: examples/pkg1/test_mod2.py
================================================
import time
def test_sleep_400ms():
time.sleep(0.4)
================================================
FILE: examples/pkg2/__init__.py
================================================
================================================
FILE: examples/pkg2/test_mod_a.py
================================================
import time
def test_master_sleep():
t_a = time.time()
b_continue = True
while b_continue:
t_delta = time.time() - t_a
b_continue = t_delta < 5
================================================
FILE: examples/pkg3/__init__.py
================================================
================================================
FILE: examples/pkg3/test_mod_cl.py
================================================
import time
class TestClass:
def setup_method(self, test_method):
self.__value = test_method.__name__
time.sleep(1)
def test_method1(self):
time.sleep(0.5)
assert self.__value == "test_method1"
================================================
FILE: examples/pkg4/__init__.py
================================================
================================================
FILE: examples/pkg4/test_mod_a.py
================================================
import time
import pytest
pytestmark = pytest.mark.monitor_skip_test
pytest_monitor_component = "test"
def test_not_monitored():
t_a = time.time()
b_continue = True
while b_continue:
t_delta = time.time() - t_a
b_continue = t_delta < 5
@pytest.mark.monitor_test()
def test_force_monitor():
t_a = time.time()
b_continue = True
while b_continue:
t_delta = time.time() - t_a
b_continue = t_delta < 5
================================================
FILE: examples/pkg5/__init__.py
================================================
================================================
FILE: examples/pkg5/doctest.py
================================================
def run(a, b):
"""
>>> a = 3
>>> b = 30
>>> run(a, b)
33
"""
return a + b
def try_doctest():
"""
>>> try_doctest()
33
"""
return run(3, 30)
================================================
FILE: examples/pkg5/test_special_pytest.py
================================================
import pytest
@pytest.mark.skip(reason="Some special test to skip")
def test_is_skipped():
assert True
def test_that_one_is_skipped_too():
pytest.skip("Test executed and instructed to be skipped from its body")
def test_import_or_skip():
pytest.importorskip("this_module_does_not_exists")
================================================
FILE: pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.distutils.bdist_wheel]
universal = false
[project]
name = "pytest-monitor"
authors = [
{name = "Jean-Sébastien Dieu", email = "dieu.jsebastien@yahoo.com"},
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Framework :: Pytest",
"Intended Audience :: Developers",
"Topic :: Software Development :: Testing",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
]
dependencies = [
"pytest",
"requests",
"psutil>=5.1.0",
"memory_profiler>=0.58",
"wheel",
]
description = "A pytest plugin designed for analyzing resource usage during tests."
license = {text = "MIT"}
maintainers = [
{name = "Jean-Sébastien Dieu", email = "dieu.jsebastien@yahoo.com"},
]
readme = "README.rst"
requires-python = ">=3.8"
version = "1.7.0"
[project.urls]
"Source" = "https://github.com/CFMTech/pytest-monitor"
"Tracker" = "https://github.com/CFMTech/pytest-monitor/issues"
"Documentation" = "https://pytest-monitor.readthedocs.io/"
"Homepage" = "https://pytest-monitor.readthedocs.io/"
[project.entry-points.pytest11]
monitor = "pytest_monitor.pytest_monitor"
[project.optional-dependencies]
dev = [
"black",
"isort",
"flake8==6.0.0",
"flake8-builtins==2.1.0",
"flake8-simplify==0.19.3",
"flake8-comprehensions==3.10.1",
"flake8-pytest-style==1.6.0",
"flake8-return==1.2.0",
"flake8-simplify==0.19.3",
"flake8-pyproject==1.2.3",
"pre-commit==3.3.3"
]
[tool.flake8]
max-line-length = 120
[tool.black]
line-length = 120
[tool.isort]
profile = "black"
src_paths = ["pytest_monitor"]
================================================
FILE: pytest_monitor/__init__.py
================================================
import importlib.metadata
__version__ = importlib.metadata.version("pytest-monitor")
================================================
FILE: pytest_monitor/handler.py
================================================
import sqlite3
class DBHandler:
def __init__(self, db_path):
self.__db = db_path
self.__cnx = sqlite3.connect(self.__db) if db_path else None
self.prepare()
def query(self, what, bind_to, many=False):
cursor = self.__cnx.cursor()
cursor.execute(what, bind_to)
return cursor.fetchall() if many else cursor.fetchone()
def insert_session(self, h, run_date, scm_id, description):
with self.__cnx:
self.__cnx.execute(
"insert into TEST_SESSIONS(SESSION_H, RUN_DATE, SCM_ID, RUN_DESCRIPTION)" " values (?,?,?,?)",
(h, run_date, scm_id, description),
)
def insert_metric(
self,
session_id,
env_id,
item_start_date,
item,
item_path,
item_variant,
item_loc,
kind,
component,
total_time,
user_time,
kernel_time,
cpu_usage,
mem_usage,
):
with self.__cnx:
self.__cnx.execute(
"insert into TEST_METRICS(SESSION_H,ENV_H,ITEM_START_TIME,ITEM,"
"ITEM_PATH,ITEM_VARIANT,ITEM_FS_LOC,KIND,COMPONENT,TOTAL_TIME,"
"USER_TIME,KERNEL_TIME,CPU_USAGE,MEM_USAGE) "
"values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
(
session_id,
env_id,
item_start_date,
item,
item_path,
item_variant,
item_loc,
kind,
component,
total_time,
user_time,
kernel_time,
cpu_usage,
mem_usage,
),
)
def insert_execution_context(self, exc_context):
with self.__cnx:
self.__cnx.execute(
"insert into EXECUTION_CONTEXTS(CPU_COUNT,CPU_FREQUENCY_MHZ,CPU_TYPE,CPU_VENDOR,"
"RAM_TOTAL_MB,MACHINE_NODE,MACHINE_TYPE,MACHINE_ARCH,SYSTEM_INFO,"
"PYTHON_INFO,ENV_H) values (?,?,?,?,?,?,?,?,?,?,?)",
(
exc_context.cpu_count,
exc_context.cpu_frequency,
exc_context.cpu_type,
exc_context.cpu_vendor,
exc_context.ram_total,
exc_context.fqdn,
exc_context.machine,
exc_context.architecture,
exc_context.system_info,
exc_context.python_info,
exc_context.compute_hash(),
),
)
def prepare(self):
cursor = self.__cnx.cursor()
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS TEST_SESSIONS(
SESSION_H varchar(64) primary key not null unique, -- Session identifier
RUN_DATE varchar(64), -- Date of test run
SCM_ID varchar(128), -- SCM change id
RUN_DESCRIPTION json
);"""
)
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS TEST_METRICS (
SESSION_H varchar(64), -- Session identifier
ENV_H varchar(64), -- Environment description identifier
ITEM_START_TIME varchar(64), -- Effective start time of the test
ITEM_PATH varchar(4096), -- Path of the item, following Python import specification
ITEM varchar(2048), -- Name of the item
ITEM_VARIANT varchar(2048), -- Optional parametrization of an item.
ITEM_FS_LOC varchar(2048), -- Relative path from pytest invocation directory to the item's module.
KIND varchar(64), -- Package, Module or function
COMPONENT varchar(512) NULL, -- Tested component if any
TOTAL_TIME float, -- Total time spent running the item
USER_TIME float, -- time spent in user space
KERNEL_TIME float, -- time spent in kernel space
CPU_USAGE float, -- cpu usage
MEM_USAGE float, -- Max resident memory used.
FOREIGN KEY (ENV_H) REFERENCES EXECUTION_CONTEXTS(ENV_H),
FOREIGN KEY (SESSION_H) REFERENCES TEST_SESSIONS(SESSION_H)
);"""
)
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS EXECUTION_CONTEXTS (
ENV_H varchar(64) primary key not null unique,
CPU_COUNT integer,
CPU_FREQUENCY_MHZ integer,
CPU_TYPE varchar(64),
CPU_VENDOR varchar(256),
RAM_TOTAL_MB integer,
MACHINE_NODE varchar(512),
MACHINE_TYPE varchar(32),
MACHINE_ARCH varchar(16),
SYSTEM_INFO varchar(256),
PYTHON_INFO varchar(512)
);
"""
)
self.__cnx.commit()
================================================
FILE: pytest_monitor/pytest_monitor.py
================================================
# -*- coding: utf-8 -*-
import gc
import time
import warnings
import memory_profiler
import pytest
from pytest_monitor.session import PyTestMonitorSession
# These dictionaries are used to compute members set on each items.
# KEY is the marker set on a test function
# value is a tuple:
# expect_args: boolean
# internal marker attribute name: str
# callable that set member's value
# default value
PYTEST_MONITOR_VALID_MARKERS = {
"monitor_skip_test": (False, "monitor_skip_test", lambda x: True, False),
"monitor_skip_test_if": (True, "monitor_skip_test", lambda x: bool(x), False),
"monitor_test": (False, "monitor_force_test", lambda x: True, False),
"monitor_test_if": (True, "monitor_force_test", lambda x: bool(x), False),
}
PYTEST_MONITOR_DEPRECATED_MARKERS = {}
PYTEST_MONITOR_ITEM_LOC_MEMBER = "_location" if tuple(pytest.__version__.split(".")) < ("5", "3") else "location"
PYTEST_MONITORING_ENABLED = True
def pytest_addoption(parser):
group = parser.getgroup("monitor")
group.addoption(
"--restrict-scope-to",
dest="mtr_scope",
default="function",
help="Select the scope to monitor. By default, only function is monitored."
"Values are function, class, module, session. You can set one or more of these"
"by listing them using a comma separated list",
)
group.addoption(
"--parametrization-explicit",
dest="mtr_want_explicit_ids",
action="store_true",
help="Set this option to distinguish parametrized tests given their values."
" This requires the parameters to be stringifiable.",
)
group.addoption("--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces")
group.addoption(
"--remote-server",
action="store",
dest="mtr_remote",
help="Remote server to send the results to. Format is <ADRESS>:<PORT>",
)
group.addoption(
"--db",
action="store",
dest="mtr_db_out",
default=".pymon",
help="Use the given sqlite database for storing results.",
)
group.addoption(
"--no-db",
action="store_true",
dest="mtr_no_db",
help="Do not store results in local db.",
)
group.addoption(
"--force-component",
action="store",
dest="mtr_force_component",
help="Force the component to be set at the given value for the all tests run" " in this session.",
)
group.addoption(
"--component-prefix",
action="store",
dest="mtr_component_prefix",
help="Prefix each found components with the given value (applies to all tests" " run in this session).",
)
group.addoption(
"--no-gc",
action="store_true",
dest="mtr_disable_gc",
help="Disable garbage collection between tests (may leads to non reliable measures)",
)
group.addoption(
"--description",
action="store",
default="",
dest="mtr_description",
help="Use this option to provide a small summary about this run.",
)
group.addoption(
"--tag",
action="append",
dest="mtr_tags",
default=[],
help="Provide meaningfull flags to your run. This can help you in your analysis.",
)
def pytest_configure(config):
config.addinivalue_line("markers", "monitor_skip_test: mark test to be executed but not monitored.")
config.addinivalue_line(
"markers",
"monitor_skip_test_if(cond): mark test to be executed but " "not monitored if cond is verified.",
)
config.addinivalue_line(
"markers",
"monitor_test: mark test to be monitored (default behaviour)."
" This can turn handy to whitelist some test when you have disabled"
" monitoring on a whole module.",
)
config.addinivalue_line(
"markers",
"monitor_test_if(cond): mark test to be monitored if and only if cond"
" is verified. This can help you in whitelisting tests to be monitored"
" depending on some external conditions.",
)
def pytest_runtest_setup(item):
"""
Validate marker setup and print warnings if usage of deprecated marker is identified.
Setting marker attribute to the discovered item is done after the above described verification.
:param item: Test item
"""
if not PYTEST_MONITORING_ENABLED:
return
item_markers = {mark.name: mark for mark in item.iter_markers() if mark and mark.name.startswith("monitor_")}
mark_to_del = []
for set_marker in item_markers.keys():
if set_marker not in PYTEST_MONITOR_VALID_MARKERS:
warnings.warn("Nothing known about marker {}. Marker will be dropped.".format(set_marker))
mark_to_del.append(set_marker)
if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS:
warnings.warn(f"Marker {set_marker} is deprecated. Consider upgrading your tests")
for marker in mark_to_del:
del item_markers[marker]
all_valid_markers = PYTEST_MONITOR_VALID_MARKERS
all_valid_markers.update(PYTEST_MONITOR_DEPRECATED_MARKERS)
# Setting instantiated markers
for marker, _ in item_markers.items():
with_args, attr, fun_val, _ = all_valid_markers[marker]
attr_val = fun_val(item_markers[marker].args[0]) if with_args else fun_val(None)
setattr(item, attr, attr_val)
# Setting other markers to default values
for marker, marker_value in all_valid_markers.items():
with_args, attr, _, default = marker_value
if not hasattr(item, attr):
setattr(item, attr, default)
# Finalize marker processing by enforcing some marker's value
if item.monitor_force_test:
# This test has been explicitly flagged as 'to be monitored'.
item.monitor_skip_test = False
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""
Used to identify the current call to add times.
:param item: Test item
:param call: call instance associated to the given item
"""
outcome = yield
rep = outcome.get_result()
if rep.when == "call":
setattr(item, "test_run_duration", call.stop - call.start)
setattr(item, "test_effective_start_time", call.start)
def pytest_runtest_call(item):
if not PYTEST_MONITORING_ENABLED:
return
setattr(item, "monitor_results", False)
if hasattr(item, "module"):
setattr(
item,
"monitor_component",
getattr(item.module, "pytest_monitor_component", ""),
)
else:
setattr(item, "monitor_skip_test", True)
@pytest.hookimpl
def pytest_pyfunc_call(pyfuncitem):
"""
Core sniffer logic. We encapsulate the test function in a sniffer function to collect
memory results.
"""
def wrapped_function():
try:
funcargs = pyfuncitem.funcargs
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
pyfuncitem.obj(**testargs)
except Exception:
raise
except BaseException as e:
return e
def prof():
m = memory_profiler.memory_usage((wrapped_function, ()), max_iterations=1, max_usage=True, retval=True)
if isinstance(m[1], BaseException): # Do we have any outcome?
raise m[1]
memuse = m[0][0] if type(m[0]) is list else m[0]
setattr(pyfuncitem, "mem_usage", memuse)
setattr(pyfuncitem, "monitor_results", True)
if not PYTEST_MONITORING_ENABLED:
wrapped_function()
else:
if not pyfuncitem.session.config.option.mtr_disable_gc:
gc.collect()
prof()
return True
def pytest_make_parametrize_id(config, val, argname):
if config.option.mtr_want_explicit_ids:
return f"{argname}={val}"
return None
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionstart(session):
"""
Instantiate a monitor session to save collected metrics.
We yield at the end to let pytest pursue the execution.
"""
if session.config.option.mtr_force_component and session.config.option.mtr_component_prefix:
raise pytest.UsageError("Invalid usage: --force-component and --component-prefix are incompatible options!")
if session.config.option.mtr_no_db and not session.config.option.mtr_remote and not session.config.option.mtr_none:
warnings.warn("pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.")
session.config.option.mtr_none = True
component = session.config.option.mtr_force_component or session.config.option.mtr_component_prefix
if session.config.option.mtr_component_prefix:
component += ".{user_component}"
if not component:
component = "{user_component}"
db = (
None
if (session.config.option.mtr_none or session.config.option.mtr_no_db)
else session.config.option.mtr_db_out
)
remote = None if session.config.option.mtr_none else session.config.option.mtr_remote
session.pytest_monitor = PyTestMonitorSession(
db=db, remote=remote, component=component, scope=session.config.option.mtr_scope
)
global PYTEST_MONITORING_ENABLED
PYTEST_MONITORING_ENABLED = not session.config.option.mtr_none
session.pytest_monitor.compute_info(session.config.option.mtr_description, session.config.option.mtr_tags)
yield
@pytest.fixture(autouse=True, scope="module")
def _prf_module_tracer(request):
if not PYTEST_MONITORING_ENABLED:
yield
else:
t_a = time.time()
ptimes_a = request.session.pytest_monitor.process.cpu_times()
yield
ptimes_b = request.session.pytest_monitor.process.cpu_times()
t_z = time.time()
rss = request.session.pytest_monitor.process.memory_info().rss / 1024**2
component = getattr(request.module, "pytest_monitor_component", "")
item = request.node.name[:-3]
pypath = request.module.__name__[: -len(item) - 1]
request.session.pytest_monitor.add_test_info(
item,
pypath,
"",
request.node._nodeid,
"module",
component,
t_a,
t_z - t_a,
ptimes_b.user - ptimes_a.user,
ptimes_b.system - ptimes_a.system,
rss,
)
@pytest.fixture(autouse=True)
def _prf_tracer(request):
if not PYTEST_MONITORING_ENABLED:
yield
else:
ptimes_a = request.session.pytest_monitor.process.cpu_times()
yield
ptimes_b = request.session.pytest_monitor.process.cpu_times()
if not request.node.monitor_skip_test and getattr(request.node, "monitor_results", False):
item_name = request.node.originalname or request.node.name
item_loc = getattr(request.node, PYTEST_MONITOR_ITEM_LOC_MEMBER)[0]
request.session.pytest_monitor.add_test_info(
item_name,
request.module.__name__,
request.node.name,
item_loc,
"function",
request.node.monitor_component,
request.node.test_effective_start_time,
request.node.test_run_duration,
ptimes_b.user - ptimes_a.user,
ptimes_b.system - ptimes_a.system,
request.node.mem_usage,
)
================================================
FILE: pytest_monitor/session.py
================================================
import datetime
import hashlib
import json
import os
import warnings
from http import HTTPStatus
import memory_profiler
import psutil
import requests
from pytest_monitor.handler import DBHandler
from pytest_monitor.sys_utils import (
ExecutionContext,
collect_ci_info,
determine_scm_revision,
)
class PyTestMonitorSession:
def __init__(self, db=None, remote=None, component="", scope=None, tracing=True):
self.__db = None
if db:
self.__db = DBHandler(db)
self.__monitor_enabled = tracing
self.__remote = remote
self.__component = component
self.__session = ""
self.__scope = scope or []
self.__eid = (None, None)
self.__mem_usage_base = None
self.__process = psutil.Process(os.getpid())
@property
def monitoring_enabled(self):
return self.__monitor_enabled
@property
def remote_env_id(self):
return self.__eid[1]
@property
def db_env_id(self):
return self.__eid[0]
@property
def process(self):
return self.__process
def get_env_id(self, env):
db, remote = None, None
if self.__db:
row = self.__db.query("SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.compute_hash(),))
db = row[0] if row else None
if self.__remote:
r = requests.get(f"{self.__remote}/contexts/{env.compute_hash()}")
remote = None
if r.status_code == HTTPStatus.OK:
remote = json.loads(r.text)
if remote["contexts"]:
remote = remote["contexts"][0]["h"]
else:
remote = None
return db, remote
def compute_info(self, description, tags):
run_date = datetime.datetime.now().isoformat()
scm = determine_scm_revision()
h = hashlib.md5()
h.update(scm.encode())
h.update(run_date.encode())
h.update(description.encode())
self.__session = h.hexdigest()
# From description + tags to JSON format
d = collect_ci_info()
if description:
d["description"] = description
for tag in tags:
if type(tag) is str:
_tag_info = tag.split("=", 1)
d[_tag_info[0]] = _tag_info[1]
else:
for sub_tag in tag:
_tag_info = sub_tag.split("=", 1)
d[_tag_info[0]] = _tag_info[1]
description = json.dumps(d)
# Now get memory usage base and create the database
self.prepare()
self.set_environment_info(ExecutionContext())
if self.__db:
self.__db.insert_session(self.__session, run_date, scm, description)
if self.__remote:
r = requests.post(
f"{self.__remote}/sessions/",
json={
"session_h": self.__session,
"run_date": run_date,
"scm_ref": scm,
"description": json.loads(description),
},
)
if r.status_code != HTTPStatus.CREATED:
self.__remote = ""
msg = f"Cannot insert session in remote monitor server ({r.status_code})! Deactivating...')"
warnings.warn(msg)
def set_environment_info(self, env):
self.__eid = self.get_env_id(env)
db_id, remote_id = self.__eid
if self.__db and db_id is None:
self.__db.insert_execution_context(env)
db_id = self.__db.query("select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.compute_hash(),))[0]
if self.__remote and remote_id is None:
# We must postpone that to be run at the end of the pytest session.
r = requests.post(f"{self.__remote}/contexts/", json=env.to_dict())
if r.status_code != HTTPStatus.CREATED:
warnings.warn(f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating...")
self.__remote = ""
else:
remote_id = json.loads(r.text)["h"]
self.__eid = db_id, remote_id
def prepare(self):
def dummy():
return True
memuse = memory_profiler.memory_usage((dummy,), max_iterations=1, max_usage=True)
self.__mem_usage_base = memuse[0] if type(memuse) is list else memuse
def add_test_info(
self,
item,
item_path,
item_variant,
item_loc,
kind,
component,
item_start_time,
total_time,
user_time,
kernel_time,
mem_usage,
):
if kind not in self.__scope:
return
mem_usage = float(mem_usage) - self.__mem_usage_base
cpu_usage = (user_time + kernel_time) / total_time
item_start_time = datetime.datetime.fromtimestamp(item_start_time).isoformat()
final_component = self.__component.format(user_component=component)
if final_component.endswith("."):
final_component = final_component[:-1]
item_variant = item_variant.replace("-", ", ") # No choice
if self.__db and self.db_env_id is not None:
self.__db.insert_metric(
self.__session,
self.db_env_id,
item_start_time,
item,
item_path,
item_variant,
item_loc,
kind,
final_component,
total_time,
user_time,
kernel_time,
cpu_usage,
mem_usage,
)
if self.__remote and self.remote_env_id is not None:
r = requests.post(
f"{self.__remote}/metrics/",
json={
"session_h": self.__session,
"context_h": self.remote_env_id,
"item_start_time": item_start_time,
"item_path": item_path,
"item": item,
"item_variant": item_variant,
"item_fs_loc": item_loc,
"kind": kind,
"component": final_component,
"total_time": total_time,
"user_time": user_time,
"kernel_time": kernel_time,
"cpu_usage": cpu_usage,
"mem_usage": mem_usage,
},
)
if r.status_code != HTTPStatus.CREATED:
self.__remote = ""
msg = f"Cannot insert values in remote monitor server ({r.status_code})! Deactivating...')"
warnings.warn(msg)
================================================
FILE: pytest_monitor/sys_utils.py
================================================
import hashlib
import multiprocessing
import os
import platform
import socket
import subprocess
import sys
import warnings
import psutil
def collect_ci_info():
# Test for jenkins
if "BUILD_NUMBER" in os.environ and ("BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ):
br = os.environ["BRANCH_NAME"] if "BRANCH_NAME" in os.environ else os.environ["JOB_NAME"]
return {
"pipeline_branch": br,
"pipeline_build_no": os.environ["BUILD_NUMBER"],
"__ci__": "jenkinsci",
}
# Test for CircleCI
if "CIRCLE_JOB" in os.environ and "CIRCLE_BUILD_NUM" in os.environ:
return {
"pipeline_branch": os.environ["CIRCLE_JOB"],
"pipeline_build_no": os.environ["CIRCLE_BUILD_NUM"],
"__ci__": "circleci",
}
# Test for TravisCI
if "TRAVIS_BUILD_NUMBER" in os.environ and "TRAVIS_BUILD_ID" in os.environ:
return {
"pipeline_branch": os.environ["TRAVIS_BUILD_ID"],
"pipeline_build_no": os.environ["TRAVIS_BUILD_NUMBER"],
"__ci__": "travisci",
}
# Test for DroneCI
if "DRONE_REPO_BRANCH" in os.environ and "DRONE_BUILD_NUMBER" in os.environ:
return {
"pipeline_branch": os.environ["DRONE_REPO_BRANCH"],
"pipeline_build_no": os.environ["DRONE_BUILD_NUMBER"],
"__ci__": "droneci",
}
# Test for Gitlab CI
if "CI_JOB_NAME" in os.environ and "CI_PIPELINE_ID" in os.environ:
return {
"pipeline_branch": os.environ["CI_JOB_NAME"],
"pipeline_build_no": os.environ["CI_PIPELINE_ID"],
"__ci__": "gitlabci",
}
# Test for Bitbucket CI
if "BITBUCKET_BRANCH" in os.environ and "BITBUCKET_BUILD_NUMBER" in os.environ:
return {
"pipeline_branch": os.environ["BITBUCKET_BRANCH"],
"pipeline_build_no": os.environ["BITBUCKET_BUILD_NUMBER"],
"__ci__": "bitbucketci",
}
return {}
def determine_scm_revision():
for scm, cmd in (("git", r"git rev-parse HEAD"), ("p4", r"p4 changes -m1 \#have")):
p = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
p_out, _ = p.communicate()
if p.returncode == 0:
scm_ref = p_out.decode(errors="ignore").split("\n", maxsplit=1)[0]
if scm == "p4":
scm_ref = scm_ref.split()[1]
return scm_ref
return ""
def _get_cpu_string():
if platform.system().lower() == "darwin":
old_path = os.environ["PATH"]
os.environ["PATH"] = old_path + ":" + "/usr/sbin"
ret = subprocess.check_output("sysctl -n machdep.cpu.brand_string", shell=True)
os.environ["PATH"] = old_path
return ret.decode().strip()
if platform.system().lower() == "linux":
with open("/proc/cpuinfo", "r", encoding="utf-8") as f:
lines = [i for i in f if i.startswith("model name")]
if lines:
return lines[0].split(":")[1].strip()
return platform.processor()
class ExecutionContext:
def __init__(self):
self.__cpu_count = multiprocessing.cpu_count()
self.__cpu_vendor = _get_cpu_string()
if int(os.environ.get("PYTEST_MONITOR_FORCE_CPU_FREQ", "0")):
self._read_cpu_freq_from_env()
else:
try:
self.__cpu_freq_base = psutil.cpu_freq().current
except (AttributeError, NotImplementedError, FileNotFoundError):
warnings.warn("Unable to fetch CPU frequency. Trying to read it from environment..")
self._read_cpu_freq_from_env()
self.__proc_typ = platform.processor()
self.__tot_mem = int(psutil.virtual_memory().total / 1024**2)
self.__fqdn = socket.getfqdn()
self.__machine = platform.machine()
self.__arch = platform.architecture()[0]
self.__system = f"{platform.system()} - {platform.release()}"
self.__py_ver = sys.version
def _read_cpu_freq_from_env(self):
try:
self.__cpu_freq_base = float(os.environ.get("PYTEST_MONITOR_CPU_FREQ", "0."))
except (ValueError, TypeError):
warnings.warn("Wrong type/value while reading cpu frequency from environment. Forcing to 0.0.")
self.__cpu_freq_base = 0.0
def to_dict(self):
return {
"cpu_count": self.cpu_count,
"cpu_frequency": self.cpu_frequency,
"cpu_type": self.cpu_type,
"cpu_vendor": self.cpu_vendor,
"ram_total": self.ram_total,
"machine_node": self.fqdn,
"machine_type": self.machine,
"machine_arch": self.architecture,
"system_info": self.system_info,
"python_info": self.python_info,
"h": self.compute_hash(),
}
@property
def cpu_count(self):
return self.__cpu_count
@property
def cpu_frequency(self):
return self.__cpu_freq_base
@property
def cpu_type(self):
return self.__proc_typ
@property
def cpu_vendor(self):
return self.__cpu_vendor
@property
def ram_total(self):
return self.__tot_mem
@property
def fqdn(self):
return self.__fqdn
@property
def machine(self):
return self.__machine
@property
def architecture(self):
return self.__arch
@property
def system_info(self):
return self.__system
@property
def python_info(self):
return self.__py_ver
def compute_hash(self):
hr = hashlib.md5()
hr.update(str(self.__cpu_count).encode())
hr.update(str(self.__cpu_freq_base).encode())
hr.update(str(self.__proc_typ).encode())
hr.update(str(self.__tot_mem).encode())
hr.update(str(self.__fqdn).encode())
hr.update(str(self.__machine).encode())
hr.update(str(self.__arch).encode())
hr.update(str(self.__system).encode())
hr.update(str(self.__py_ver).encode())
return hr.hexdigest()
================================================
FILE: requirements.dev.txt
================================================
psutil>=5.1.0
memory_profiler>=0.58
pytest
requests
black
isort
flake8=6.1.0
flake8-builtins=2.1.0
flake8-simplify=0.19.3
flake8-comprehensions=3.10.1
flake8-pytest-style=1.6.0
flake8-return=1.2.0
flake8-pyproject=1.2.3
pre-commit=3.3.3
================================================
FILE: requirements.txt
================================================
psutil>=5.1.0
memory_profiler>=0.58
pytest
requests
================================================
FILE: tests/conftest.py
================================================
pytest_plugins = ["pytester"]
================================================
FILE: tests/test_monitor.py
================================================
# -*- coding: utf-8 -*-
import json
import pathlib
import sqlite3
import pytest
def test_monitor_basic_test(testdir):
"""Make sure that pytest-monitor does the job without impacting user tests."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-vv", "--tag", "version=12.3.5")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the test suite
result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert len(cursor.fetchall()) == 1
cursor = db.cursor()
tags = json.loads(cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0])
assert "description" not in tags
assert "version" in tags
assert tags["version"] == "12.3.5"
def test_monitor_basic_test_description(testdir):
"""Make sure that pytest-monitor does the job without impacting user tests."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-vv", "--description", '"Test"', "--tag", "version=12.3.5")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the test suite
result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert len(cursor.fetchall()) == 1
cursor = db.cursor()
tags = json.loads(cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0])
assert "description" in tags
assert tags["description"] == '"Test"'
assert "version" in tags
assert tags["version"] == "12.3.5"
def test_monitor_pytest_skip_marker(testdir):
"""Make sure that pytest-monitor does the job without impacting user tests."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import time
@pytest.mark.skip("Some reason")
def test_skipped():
assert True
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_skipped SKIPPED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(skipped=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert not len(cursor.fetchall())
def test_monitor_pytest_skip_marker_on_fixture(testdir):
"""Make sure that pytest-monitor does the job without impacting user tests."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import time
@pytest.fixture
def a_fixture():
pytest.skip("because this is the scenario being tested")
def test_skipped(a_fixture):
assert True
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_skipped SKIPPED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(skipped=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert not len(cursor.fetchall())
def test_bad_markers(testdir):
"""Make sure that pytest-monitor warns about unknown markers."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import time
@pytest.mark.monitor_bad_marker
def test_ok():
time.sleep(0.1)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert len(cursor.fetchall()) == 1
def test_monitor_skip_module(testdir):
"""Make sure that pytest-monitor correctly understand the monitor_skip_test marker."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import time
pytestmark = pytest.mark.monitor_skip_test
def test_ok_not_monitored():
time.sleep(0.1)
x = ['a' * i for i in range(100)]
assert len(x) == 100
def test_another_function_ok_not_monitored():
assert True
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(
[
"*::test_ok_not_monitored PASSED*",
"*::test_another_function_ok_not_monitored PASSED*",
]
)
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=2)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert not len(cursor.fetchall()) # Nothing ran
def test_monitor_skip_test(testdir):
"""Make sure that pytest-monitor correctly understand the monitor_skip_test marker."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import time
@pytest.mark.monitor_skip_test
def test_not_monitored():
time.sleep(0.1)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_not_monitored PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert not len(cursor.fetchall()) # nothing monitored
def test_monitor_skip_test_if(testdir):
"""Make sure that pytest-monitor correctly understand the monitor_skip_test_if marker."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import time
@pytest.mark.monitor_skip_test_if(True)
def test_not_monitored():
time.sleep(0.1)
x = ['a' * i for i in range(100)]
assert len(x) == 100
@pytest.mark.monitor_skip_test_if(False)
def test_monitored():
time.sleep(0.1)
x = ['a' *i for i in range(100)]
assert len(x) == 100
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=2)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert len(cursor.fetchall()) == 1
def test_monitor_no_db(testdir):
"""Make sure that pytest-monitor correctly understand the monitor_skip_test_if marker."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import time
def test_it():
time.sleep(0.1)
x = ['a' * i for i in range(100)]
assert len(x) == 100
def test_that():
time.sleep(0.1)
x = ['a' *i for i in range(100)]
assert len(x) == 100
"""
)
wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring."
with pytest.warns(UserWarning, match=wrn):
# run pytest with the following cmd args
result = testdir.runpytest("--no-db", "-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_it PASSED*", "*::test_that PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert not pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=2)
def test_monitor_basic_output(testdir):
"""Make sure that pytest-monitor does not repeat captured output (issue #26)."""
# create a temporary pytest test module
testdir.makepyfile(
"""
def test_it():
print('Hello World')
"""
)
wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring."
with pytest.warns(UserWarning, match=wrn):
# run pytest with the following cmd args
result = testdir.runpytest("--no-db", "-s", "-vv")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_it Hello World*"])
assert "Hello World" != result.stdout.get_lines_after("*Hello World")[0]
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
def test_monitor_with_doctest(testdir):
"""Make sure that pytest-monitor does not fail to run doctest."""
# create a temporary pytest test module
testdir.makepyfile(
'''
def run(a, b):
"""
>>> run(3, 30)
33
"""
return a + b
'''
)
# run pytest with the following cmd args
result = testdir.runpytest("--doctest-modules", "-vv")
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert not len(cursor.fetchall())
pymon_path.unlink()
result = testdir.runpytest("--doctest-modules", "--no-monitor", "-vv")
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
assert not pymon_path.exists()
================================================
FILE: tests/test_monitor_component.py
================================================
# -*- coding: utf-8 -*-
import pathlib
import sqlite3
def test_monitor_no_component(testdir):
"""Make sure that pytest-monitor has an empty component by default"""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert len(cursor.fetchall()) == 1
cursor.execute("SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';")
assert not len(cursor.fetchall())
def test_monitor_force_component(testdir):
"""Make sure that pytest-monitor forces the component name if required"""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_force_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("--force-component", "my_component", "-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_force_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert len(cursor.fetchall()) == 1
cursor.execute(
"SELECT ITEM FROM TEST_METRICS" " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';"
)
assert len(cursor.fetchall()) == 1
def test_monitor_prefix_component(testdir):
"""Make sure that pytest-monitor has a prefixed component"""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
pytest_monitor_component = 'internal'
def test_prefix_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("--component-prefix", "my_component", "-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_prefix_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert len(cursor.fetchall()) == 1
cursor.execute(
"SELECT ITEM FROM TEST_METRICS" " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';"
)
assert not len(cursor.fetchall())
cursor.execute(
"SELECT ITEM FROM TEST_METRICS" " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';"
)
assert len(cursor.fetchall()) == 1
def test_monitor_prefix_without_component(testdir):
"""Make sure that pytest-monitor has a prefixed component"""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_prefix_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
# run pytest with the following cmd args
result = testdir.runpytest("--component-prefix", "my_component", "-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_prefix_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
assert len(cursor.fetchall()) == 1
cursor.execute(
"SELECT ITEM FROM TEST_METRICS" " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';"
)
assert len(cursor.fetchall()) == 1
================================================
FILE: tests/test_monitor_context.py
================================================
import os
import pathlib
import sqlite3
import mock
import pytest
CPU_FREQ_PATH = "pytest_monitor.sys_utils.psutil.cpu_freq"
TEST_CONTENT = """
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
def get_nb_metrics_with_cpu_freq(path):
db_path = path / ".pymon"
db = sqlite3.connect(db_path.as_posix())
cursor = db.cursor()
cursor.execute("SELECT ITEM FROM TEST_METRICS;")
nb_metrics = len(cursor.fetchall())
cursor = db.cursor()
cursor.execute("SELECT CPU_FREQUENCY_MHZ FROM EXECUTION_CONTEXTS;")
rows = cursor.fetchall()
assert len(rows) == 1
cpu_freq = rows[0][0]
return nb_metrics, cpu_freq
def test_force_cpu_freq_set_0_use_psutil(testdir):
"""Test that when force mode is set, we do not call psutil to fetch CPU's frequency"""
# create a temporary pytest test module
testdir.makepyfile(TEST_CONTENT)
with mock.patch(CPU_FREQ_PATH, return_value=1500) as cpu_freq_mock:
os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] = "0"
os.environ["PYTEST_MONITOR_CPU_FREQ"] = "3000"
# run pytest with the following cmd args
result = testdir.runpytest("-vv")
del os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"]
del os.environ["PYTEST_MONITOR_CPU_FREQ"]
cpu_freq_mock.assert_called()
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
# make sure that we get a '0' exit code for the test suite
result.assert_outcomes(passed=1)
nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))
assert (nb_metrics, cpu_freq) == (1, 3000)
def test_force_cpu_freq(testdir):
"""Test that when force mode is set, we do not call psutil to fetch CPU's frequency"""
# create a temporary pytest test module
testdir.makepyfile(TEST_CONTENT)
with mock.patch(CPU_FREQ_PATH, return_value=1500) as cpu_freq_mock:
os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] = "1"
os.environ["PYTEST_MONITOR_CPU_FREQ"] = "3000"
# run pytest with the following cmd args
result = testdir.runpytest("-vv")
del os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"]
del os.environ["PYTEST_MONITOR_CPU_FREQ"]
cpu_freq_mock.assert_not_called()
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
# make sure that we get a '0' exit code for the test suite
result.assert_outcomes(passed=1)
nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))
assert (nb_metrics, cpu_freq) == (1, 3000)
@pytest.mark.parametrize("effect", [AttributeError, NotImplementedError, FileNotFoundError])
def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback(effect, testdir):
"""Make sure that pytest-monitor fallback takes value of CPU FREQ from special env var"""
# create a temporary pytest test module
testdir.makepyfile(TEST_CONTENT)
with mock.patch(CPU_FREQ_PATH, side_effect=effect) as cpu_freq_mock:
os.environ["PYTEST_MONITOR_CPU_FREQ"] = "3000"
# run pytest with the following cmd args
result = testdir.runpytest("-vv")
del os.environ["PYTEST_MONITOR_CPU_FREQ"]
cpu_freq_mock.assert_called()
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
# make sure that we get a '0' exit code for the test suite
result.assert_outcomes(passed=1)
nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))
assert (nb_metrics, cpu_freq) == (1, 3000)
@pytest.mark.parametrize("effect", [AttributeError, NotImplementedError, FileNotFoundError])
def test_when_cpu_freq_cannot_fetch_frequency_set_freq_to_0(effect, testdir):
"""Make sure that pytest-monitor's fallback mechanism is efficient enough."""
# create a temporary pytest test module
testdir.makepyfile(TEST_CONTENT)
with mock.patch(CPU_FREQ_PATH, side_effect=effect) as cpu_freq_mock:
# run pytest with the following cmd args
result = testdir.runpytest("-vv")
cpu_freq_mock.assert_called()
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
# make sure that we get a '0' exit code for the test suite
result.assert_outcomes(passed=1)
nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))
assert (nb_metrics, cpu_freq) == (1, 0)
@mock.patch("pytest_monitor.sys_utils.psutil.cpu_freq", return_value=None)
def test_when_cpu_freq_cannot_fetch_frequency(cpu_freq_mock, testdir):
"""Make sure that pytest-monitor does the job when we have issue in collecing context resources"""
# create a temporary pytest test module
testdir.makepyfile(TEST_CONTENT)
# run pytest with the following cmd args
result = testdir.runpytest("-vv")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
# make sure that we get a '0' exit code for the test suite
result.assert_outcomes(passed=1)
nb_metrics, cpu_freq = get_nb_metrics_with_cpu_freq(pathlib.Path(str(testdir)))
assert (nb_metrics, cpu_freq) == (1, 0)
================================================
FILE: tests/test_monitor_in_ci.py
================================================
# -*- coding: utf-8 -*-
import os
import pathlib
import sqlite3
def test_monitor_no_ci(testdir):
"""Make sure that pytest-monitor does not insert CI information."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
envs = {}
for k in [
"CIRCLE_BUILD_NUM",
"CIRCLE_JOB",
"DRONE_REPO_BRANCH",
"DRONE_BUILD_NUMBER",
"BUILD_NUMBER",
"JOB_NUMBER",
"JOB_NAME",
"TRAVIS_BUILD_ID",
"TRAVIS_BUILD_NUMBER",
"CI_PIPELINE_ID",
"CI_JOB_NAME",
"BITBUCKET_BRANCH",
"BITBUCKET_BUILD_NUMBER",
]:
if k in os.environ:
envs[k] = os.environ[k]
del os.environ[k]
# run pytest with the following cmd args
result = testdir.runpytest("-v")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;")
desc = cursor.fetchall()
assert len(desc) == 1 # current test
assert desc[0][0] == "{}"
for k in envs.keys():
os.environ[k] = envs[k]
def test_monitor_jenkins_ci(testdir):
"""Make sure that pytest-monitor correctly handle Jenkins CI information."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
def check_that(the_result, match):
# fnmatch_lines does an assertion internally
the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
the_result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;")
desc = cursor.fetchall()
assert len(desc) == 1 # current test
assert desc[0][0] == match
pymon_path.unlink()
run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "jenkinsci"}'
envs = {}
for k in [
"CIRCLE_BUILD_NUM",
"CIRCLE_JOB",
"DRONE_REPO_BRANCH",
"DRONE_BUILD_NUMBER",
"BUILD_NUMBER",
"JOB_NUMBER",
"JOB_NAME",
"TRAVIS_BUILD_ID",
"TRAVIS_BUILD_NUMBER",
"CI_PIPELINE_ID",
"CI_JOB_NAME",
"BITBUCKET_BRANCH",
"BITBUCKET_BUILD_NUMBER",
]:
if k in os.environ:
envs[k] = os.environ[k]
del os.environ[k]
for env, exp in [
({"BUILD_NUMBER": "123"}, "{}"),
({"BUILD_NUMBER": "123", "JOB_NAME": "test"}, run_description),
({"BUILD_NUMBER": "123", "BRANCH_NAME": "test"}, run_description),
(
{"BUILD_NUMBER": "123", "JOB_NAME": "test-123", "BRANCH_NAME": "test"},
run_description,
),
]:
if "BUILD_NUMBER" in os.environ:
del os.environ["BUILD_NUMBER"]
if "JOB_NUMBER" in os.environ:
del os.environ["JOB_NAME"]
if "BRANCH_NUMBER" in os.environ:
del os.environ["BRANCH_NAME"]
for k, v in env.items():
os.environ[k] = v
result = testdir.runpytest("-v")
check_that(result, match=exp)
if "BUILD_NUMBER" in os.environ:
del os.environ["BUILD_NUMBER"]
if "JOB_NUMBER" in os.environ:
del os.environ["JOB_NAME"]
if "BRANCH_NUMBER" in os.environ:
del os.environ["BRANCH_NAME"]
def test_monitor_gitlab_ci(testdir):
"""Make sure that pytest-monitor correctly handle Gitlab CI information."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
def check_that(the_result, match):
# fnmatch_lines does an assertion internally
the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
the_result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;")
desc = cursor.fetchall()
assert len(desc) == 1 # current test
assert desc[0][0] == match
pymon_path.unlink()
run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "gitlabci"}'
envs = {}
for k in [
"CIRCLE_BUILD_NUM",
"CIRCLE_JOB",
"DRONE_REPO_BRANCH",
"DRONE_BUILD_NUMBER",
"BUILD_NUMBER",
"JOB_NUMBER",
"JOB_NAME",
"TRAVIS_BUILD_ID",
"TRAVIS_BUILD_NUMBER",
"CI_PIPELINE_ID",
"CI_JOB_NAME",
"BITBUCKET_BRANCH",
"BITBUCKET_BUILD_NUMBER",
]:
if k in os.environ:
envs[k] = os.environ[k]
del os.environ[k]
for env, exp in [
({"CI_PIPELINE_ID": "123"}, "{}"),
({"CI_PIPELINE_ID": "123", "CI_JOB_NAME": "test"}, run_description),
({"CI_JOB_NAME": "123"}, "{}"),
]:
if "CI_PIPELINE_ID" in os.environ:
del os.environ["CI_PIPELINE_ID"]
if "CI_JOB_NAME" in os.environ:
del os.environ["CI_JOB_NAME"]
for k, v in env.items():
os.environ[k] = v
result = testdir.runpytest("-v")
check_that(result, match=exp)
if "CI_PIPELINE_ID" in os.environ:
del os.environ["CI_PIPELINE_ID"]
if "CI_JOB_NAME" in os.environ:
del os.environ["CI_JOB_NAME"]
def test_monitor_travis_ci(testdir):
"""Make sure that pytest-monitor correctly handle Travis CI information."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
def check_that(the_result, match):
# fnmatch_lines does an assertion internally
the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
the_result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;")
desc = cursor.fetchall()
assert len(desc) == 1 # current test
assert desc[0][0] == match
pymon_path.unlink()
run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "travisci"}'
envs = {}
for k in [
"CIRCLE_BUILD_NUM",
"CIRCLE_JOB",
"DRONE_REPO_BRANCH",
"DRONE_BUILD_NUMBER",
"BUILD_NUMBER",
"JOB_NUMBER",
"JOB_NAME",
"TRAVIS_BUILD_ID",
"TRAVIS_BUILD_NUMBER",
"CI_PIPELINE_ID",
"CI_JOB_NAME",
"BITBUCKET_BRANCH",
"BITBUCKET_BUILD_NUMBER",
]:
if k in os.environ:
envs[k] = os.environ[k]
del os.environ[k]
for env, exp in [
({"TRAVIS_BUILD_NUMBER": "123"}, "{}"),
({"TRAVIS_BUILD_NUMBER": "123", "TRAVIS_BUILD_ID": "test"}, run_description),
({"TRAVIS_BUILD_ID": "test-123"}, "{}"),
]:
if "TRAVIS_BUILD_NUMBER" in os.environ:
del os.environ["TRAVIS_BUILD_NUMBER"]
if "TRAVIS_BUILD_ID" in os.environ:
del os.environ["TRAVIS_BUILD_ID"]
for k, v in env.items():
os.environ[k] = v
result = testdir.runpytest("-v")
check_that(result, match=exp)
if "TRAVIS_BUILD_NUMBER" in os.environ:
del os.environ["TRAVIS_BUILD_NUMBER"]
if "TRAVIS_BUILD_ID" in os.environ:
del os.environ["TRAVIS_BUILD_ID"]
def test_monitor_circle_ci(testdir):
"""Make sure that pytest-monitor correctly handle Circle CI information."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
def check_that(the_result, match):
# fnmatch_lines does an assertion internally
the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
the_result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;")
desc = cursor.fetchall()
assert len(desc) == 1 # current test
assert desc[0][0] == match
pymon_path.unlink()
run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "circleci"}'
envs = {}
for k in [
"CIRCLE_BUILD_NUM",
"CIRCLE_JOB",
"DRONE_REPO_BRANCH",
"DRONE_BUILD_NUMBER",
"BUILD_NUMBER",
"JOB_NUMBER",
"JOB_NAME",
"TRAVIS_BUILD_ID",
"TRAVIS_BUILD_NUMBER",
"CI_PIPELINE_ID",
"CI_JOB_NAME",
"BITBUCKET_BRANCH",
"BITBUCKET_BUILD_NUMBER",
]:
if k in os.environ:
envs[k] = os.environ[k]
del os.environ[k]
for env, exp in [
({"CIRCLE_BUILD_NUM": "123"}, "{}"),
({"CIRCLE_BUILD_NUM": "123", "CIRCLE_JOB": "test"}, run_description),
({"CIRCLE_JOB": "test"}, "{}"),
]:
if "CIRCLE_BUILD_NUM" in os.environ:
del os.environ["CIRCLE_BUILD_NUM"]
if "CIRCLE_JOB" in os.environ:
del os.environ["CIRCLE_JOB"]
for k, v in env.items():
os.environ[k] = v
result = testdir.runpytest("-v")
check_that(result, match=exp)
if "CIRCLE_BUILD_NUM" in os.environ:
del os.environ["CIRCLE_BUILD_NUM"]
if "CIRCLE_JOB" in os.environ:
del os.environ["CIRCLE_JOB"]
def test_monitor_drone_ci(testdir):
"""Make sure that pytest-monitor correctly handle Jenkins CI information."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
def check_that(the_result, match):
# fnmatch_lines does an assertion internally
the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
the_result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;")
desc = cursor.fetchall()
assert len(desc) == 1 # current test
assert desc[0][0] == match
pymon_path.unlink()
run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "droneci"}'
envs = {}
for k in [
"CIRCLE_BUILD_NUM",
"CIRCLE_JOB",
"DRONE_REPO_BRANCH",
"DRONE_BUILD_NUMBER",
"BUILD_NUMBER",
"JOB_NUMBER",
"JOB_NAME",
"TRAVIS_BUILD_ID",
"TRAVIS_BUILD_NUMBER",
"CI_PIPELINE_ID",
"CI_JOB_NAME",
"BITBUCKET_BRANCH",
"BITBUCKET_BUILD_NUMBER",
]:
if k in os.environ:
envs[k] = os.environ[k]
del os.environ[k]
for env, exp in [
({"DRONE_BUILD_NUMBER": "123"}, "{}"),
({"DRONE_BUILD_NUMBER": "123", "DRONE_REPO_BRANCH": "test"}, run_description),
({"DRONE_REPO_BRANCH": "test"}, "{}"),
]:
if "DRONE_REPO_BRANCH" in os.environ:
del os.environ["DRONE_REPO_BRANCH"]
if "DRONE_BUILD_NUMBER" in os.environ:
del os.environ["DRONE_BUILD_NUMBER"]
for k, v in env.items():
os.environ[k] = v
result = testdir.runpytest("-v")
check_that(result, match=exp)
if "DRONE_REPO_BRANCH" in os.environ:
del os.environ["DRONE_REPO_BRANCH"]
if "DRONE_BUILD_NUMBER" in os.environ:
del os.environ["DRONE_BUILD_NUMBER"]
def test_monitor_bitbucket_ci(testdir):
"""Make sure that pytest-monitor correctly handle Bitbucket CI information."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import time
def test_ok():
time.sleep(0.5)
x = ['a' * i for i in range(100)]
assert len(x) == 100
"""
)
def check_that(the_result, match):
# fnmatch_lines does an assertion internally
the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"])
pymon_path = pathlib.Path(str(testdir)) / ".pymon"
assert pymon_path.exists()
# make sure that that we get a '0' exit code for the testsuite
the_result.assert_outcomes(passed=1)
db = sqlite3.connect(str(pymon_path))
cursor = db.cursor()
cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;")
desc = cursor.fetchall()
assert len(desc) == 1 # current test
assert desc[0][0] == match
pymon_path.unlink()
run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "bitbucketci"}'
envs = {}
for k in [
"CIRCLE_BUILD_NUM",
"CIRCLE_JOB",
"DRONE_REPO_BRANCH",
"DRONE_BUILD_NUMBER",
"BUILD_NUMBER",
"JOB_NUMBER",
"JOB_NAME",
"TRAVIS_BUILD_ID",
"TRAVIS_BUILD_NUMBER",
"CI_PIPELINE_ID",
"CI_JOB_NAME",
"BITBUCKET_BRANCH",
"BITBUCKET_BUILD_NUMBER",
]:
if k in os.environ:
envs[k] = os.environ[k]
del os.environ[k]
for env, exp in [
({"BITBUCKET_BUILD_NUMBER": "123"}, "{}"),
({"BITBUCKET_BUILD_NUMBER": "123", "BITBUCKET_BRANCH": "test"}, run_description),
({"BITBUCKET_BRANCH": "test"}, "{}"),
]:
if "BITBUCKET_BRANCH" in os.environ:
del os.environ["BITBUCKET_BRANCH"]
if "BITBUCKET_BUILD_NUMBER" in os.environ:
del os.environ["BITBUCKET_BUILD_NUMBER"]
for k, v in env.items():
os.environ[k] = v
result = testdir.runpytest("-v")
check_that(result, match=exp)
if "BITBUCKET_BRANCH" in os.environ:
del os.environ["BITBUCKET_BRANCH"]
if "BITBUCKET_BUILD_NUMBER" in os.environ:
del os.environ["BITBUCKET_BUILD_NUMBER"]
================================================
FILE: tox.ini
================================================
# For more information about tox, see https://tox.readthedocs.io/en/latest/
[tox]
envlist = py27,py34,py35,py36,py37,pypy,flake8
[testenv]
deps = pytest>=3.0
commands = pytest {posargs:tests}
[testenv:flake8]
skip_install = true
deps = flake8
commands = flake8 pytest_monitor.py setup.py tests
gitextract_2sbi1wsi/ ├── .circleci/ │ └── config.yml ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.md │ │ └── feature_request.md │ └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── .gitlab-ci.yml ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── AUTHORS ├── CONTRIBUTING.rst ├── LICENSE ├── MANIFEST.in ├── README.rst ├── docs/ │ ├── env.yml │ ├── requirements.txt │ └── sources/ │ ├── Makefile │ ├── changelog.rst │ ├── conf.py │ ├── configuration.rst │ ├── contributing.rst │ ├── index.rst │ ├── installation.rst │ ├── introduction.rst │ ├── make.bat │ ├── operating.rst │ ├── remote.rst │ └── run.rst ├── examples/ │ ├── pkg1/ │ │ ├── __init__.py │ │ ├── test_mod1.py │ │ └── test_mod2.py │ ├── pkg2/ │ │ ├── __init__.py │ │ └── test_mod_a.py │ ├── pkg3/ │ │ ├── __init__.py │ │ └── test_mod_cl.py │ ├── pkg4/ │ │ ├── __init__.py │ │ └── test_mod_a.py │ └── pkg5/ │ ├── __init__.py │ ├── doctest.py │ └── test_special_pytest.py ├── pyproject.toml ├── pytest_monitor/ │ ├── __init__.py │ ├── handler.py │ ├── pytest_monitor.py │ ├── session.py │ └── sys_utils.py ├── requirements.dev.txt ├── requirements.txt ├── tests/ │ ├── conftest.py │ ├── test_monitor.py │ ├── test_monitor_component.py │ ├── test_monitor_context.py │ └── test_monitor_in_ci.py └── tox.ini
SYMBOL INDEX (90 symbols across 16 files)
FILE: docs/sources/conf.py
function read_version (line 18) | def read_version():
FILE: examples/pkg1/test_mod1.py
function test_sleep1 (line 6) | def test_sleep1():
function test_sleep2 (line 11) | def test_sleep2():
function test_heavy (line 16) | def test_heavy(range_max, other):
FILE: examples/pkg1/test_mod2.py
function test_sleep_400ms (line 4) | def test_sleep_400ms():
FILE: examples/pkg2/test_mod_a.py
function test_master_sleep (line 4) | def test_master_sleep():
FILE: examples/pkg3/test_mod_cl.py
class TestClass (line 4) | class TestClass:
method setup_method (line 5) | def setup_method(self, test_method):
method test_method1 (line 9) | def test_method1(self):
FILE: examples/pkg4/test_mod_a.py
function test_not_monitored (line 10) | def test_not_monitored():
function test_force_monitor (line 19) | def test_force_monitor():
FILE: examples/pkg5/doctest.py
function run (line 1) | def run(a, b):
function try_doctest (line 11) | def try_doctest():
FILE: examples/pkg5/test_special_pytest.py
function test_is_skipped (line 5) | def test_is_skipped():
function test_that_one_is_skipped_too (line 9) | def test_that_one_is_skipped_too():
function test_import_or_skip (line 13) | def test_import_or_skip():
FILE: pytest_monitor/handler.py
class DBHandler (line 4) | class DBHandler:
method __init__ (line 5) | def __init__(self, db_path):
method query (line 10) | def query(self, what, bind_to, many=False):
method insert_session (line 15) | def insert_session(self, h, run_date, scm_id, description):
method insert_metric (line 22) | def insert_metric(
method insert_execution_context (line 63) | def insert_execution_context(self, exc_context):
method prepare (line 84) | def prepare(self):
FILE: pytest_monitor/pytest_monitor.py
function pytest_addoption (line 30) | def pytest_addoption(parser):
function pytest_configure (line 101) | def pytest_configure(config):
function pytest_runtest_setup (line 121) | def pytest_runtest_setup(item):
function pytest_runtest_makereport (line 162) | def pytest_runtest_makereport(item, call):
function pytest_runtest_call (line 176) | def pytest_runtest_call(item):
function pytest_pyfunc_call (line 191) | def pytest_pyfunc_call(pyfuncitem):
function pytest_make_parametrize_id (line 224) | def pytest_make_parametrize_id(config, val, argname):
function pytest_sessionstart (line 231) | def pytest_sessionstart(session):
function _prf_module_tracer (line 262) | def _prf_module_tracer(request):
function _prf_tracer (line 291) | def _prf_tracer(request):
FILE: pytest_monitor/session.py
class PyTestMonitorSession (line 20) | class PyTestMonitorSession:
method __init__ (line 21) | def __init__(self, db=None, remote=None, component="", scope=None, tra...
method monitoring_enabled (line 35) | def monitoring_enabled(self):
method remote_env_id (line 39) | def remote_env_id(self):
method db_env_id (line 43) | def db_env_id(self):
method process (line 47) | def process(self):
method get_env_id (line 50) | def get_env_id(self, env):
method compute_info (line 66) | def compute_info(self, description, tags):
method set_environment_info (line 107) | def set_environment_info(self, env):
method prepare (line 123) | def prepare(self):
method add_test_info (line 130) | def add_test_info(
FILE: pytest_monitor/sys_utils.py
function collect_ci_info (line 13) | def collect_ci_info():
function determine_scm_revision (line 60) | def determine_scm_revision():
function _get_cpu_string (line 72) | def _get_cpu_string():
class ExecutionContext (line 87) | class ExecutionContext:
method __init__ (line 88) | def __init__(self):
method _read_cpu_freq_from_env (line 107) | def _read_cpu_freq_from_env(self):
method to_dict (line 114) | def to_dict(self):
method cpu_count (line 130) | def cpu_count(self):
method cpu_frequency (line 134) | def cpu_frequency(self):
method cpu_type (line 138) | def cpu_type(self):
method cpu_vendor (line 142) | def cpu_vendor(self):
method ram_total (line 146) | def ram_total(self):
method fqdn (line 150) | def fqdn(self):
method machine (line 154) | def machine(self):
method architecture (line 158) | def architecture(self):
method system_info (line 162) | def system_info(self):
method python_info (line 166) | def python_info(self):
method compute_hash (line 169) | def compute_hash(self):
FILE: tests/test_monitor.py
function test_monitor_basic_test (line 9) | def test_monitor_basic_test(testdir):
function test_monitor_basic_test_description (line 48) | def test_monitor_basic_test_description(testdir):
function test_monitor_pytest_skip_marker (line 88) | def test_monitor_pytest_skip_marker(testdir):
function test_monitor_pytest_skip_marker_on_fixture (line 121) | def test_monitor_pytest_skip_marker_on_fixture(testdir):
function test_bad_markers (line 157) | def test_bad_markers(testdir):
function test_monitor_skip_module (line 193) | def test_monitor_skip_module(testdir):
function test_monitor_skip_test (line 236) | def test_monitor_skip_test(testdir):
function test_monitor_skip_test_if (line 272) | def test_monitor_skip_test_if(testdir):
function test_monitor_no_db (line 315) | def test_monitor_no_db(testdir):
function test_monitor_basic_output (line 353) | def test_monitor_basic_output(testdir):
function test_monitor_with_doctest (line 376) | def test_monitor_with_doctest(testdir):
FILE: tests/test_monitor_component.py
function test_monitor_no_component (line 6) | def test_monitor_no_component(testdir):
function test_monitor_force_component (line 42) | def test_monitor_force_component(testdir):
function test_monitor_prefix_component (line 80) | def test_monitor_prefix_component(testdir):
function test_monitor_prefix_without_component (line 123) | def test_monitor_prefix_without_component(testdir):
FILE: tests/test_monitor_context.py
function get_nb_metrics_with_cpu_freq (line 21) | def get_nb_metrics_with_cpu_freq(path):
function test_force_cpu_freq_set_0_use_psutil (line 35) | def test_force_cpu_freq_set_0_use_psutil(testdir):
function test_force_cpu_freq (line 59) | def test_force_cpu_freq(testdir):
function test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback (line 84) | def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback...
function test_when_cpu_freq_cannot_fetch_frequency_set_freq_to_0 (line 107) | def test_when_cpu_freq_cannot_fetch_frequency_set_freq_to_0(effect, test...
function test_when_cpu_freq_cannot_fetch_frequency (line 128) | def test_when_cpu_freq_cannot_fetch_frequency(cpu_freq_mock, testdir):
FILE: tests/test_monitor_in_ci.py
function test_monitor_no_ci (line 7) | def test_monitor_no_ci(testdir):
function test_monitor_jenkins_ci (line 65) | def test_monitor_jenkins_ci(testdir):
function test_monitor_gitlab_ci (line 151) | def test_monitor_gitlab_ci(testdir):
function test_monitor_travis_ci (line 228) | def test_monitor_travis_ci(testdir):
function test_monitor_circle_ci (line 305) | def test_monitor_circle_ci(testdir):
function test_monitor_drone_ci (line 382) | def test_monitor_drone_ci(testdir):
function test_monitor_bitbucket_ci (line 458) | def test_monitor_bitbucket_ci(testdir):
Condensed preview — 53 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (170K chars).
[
{
"path": ".circleci/config.yml",
"chars": 7323,
"preview": "version: 2.1\n\naliases:\n docker-image: &image\n - image: mambaorg/micromamba\n filter-pr-only: &PR-only\n branches:\n"
},
{
"path": ".github/ISSUE_TEMPLATE/bug_report.md",
"chars": 733,
"preview": "---\nname: Bug report\nabout: Create a report to help us improve\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Describe the b"
},
{
"path": ".github/ISSUE_TEMPLATE/feature_request.md",
"chars": 595,
"preview": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Is your fea"
},
{
"path": ".github/PULL_REQUEST_TEMPLATE.md",
"chars": 2842,
"preview": "<!--\n:tada: Thanks for submitting a PR to `pytest-monitor` :tada:\n\nThis template is here to guide you with your submissi"
},
{
"path": ".gitignore",
"chars": 916,
"preview": "**/.pymon\n.idea/\n\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Dis"
},
{
"path": ".gitlab-ci.yml",
"chars": 751,
"preview": "image: continuumio/miniconda\n\nstages:\n - test\n - deploy\n\nbefore_script:\n - conda create -q -n pymon -y python=3.6\n"
},
{
"path": ".pre-commit-config.yaml",
"chars": 436,
"preview": "repos:\n- repo: local\n hooks:\n - id: black\n name: black\n entry: black \n language: system\n pas"
},
{
"path": ".readthedocs.yml",
"chars": 379,
"preview": "# .readthedocs.yml\n# Read the Docs configuration file\n# See https://docs.readthedocs.io/en/stable/config-file/v2.html fo"
},
{
"path": "AUTHORS",
"chars": 256,
"preview": "Project developed and lead by Jean-Sébastine Dieu.\n\nContributors include:\n - Raymond Gauthier (jraygauthier) added Pytho"
},
{
"path": "CONTRIBUTING.rst",
"chars": 7392,
"preview": "=============================\nContribution, getting started\n=============================\n\nContributions are highly welc"
},
{
"path": "LICENSE",
"chars": 1091,
"preview": "\nThe MIT License (MIT)\n\nCopyright (c) 2020 Capital Fund Management\n\nPermission is hereby granted, free of charge, to any"
},
{
"path": "MANIFEST.in",
"chars": 97,
"preview": "include LICENSE\ninclude README.rst\n\nrecursive-exclude * __pycache__\nrecursive-exclude * *.py[co]\n"
},
{
"path": "README.rst",
"chars": 11961,
"preview": ".. image:: docs/sources/_static/pytestmonitor_readme.png\n :width: 160\n :align: center\n :alt: Pytest-Monitor\n\n-----"
},
{
"path": "docs/env.yml",
"chars": 956,
"preview": "name: docenv\n\nchannels:\n - anaconda\n\ndependencies:\n - python==3.7\n - pip:\n - alabaster==0.7.12\n - asn1crypto="
},
{
"path": "docs/requirements.txt",
"chars": 96,
"preview": "alabaster\nbabel\nsphinx\nsphinx-releases\nsphinx_rtd_theme\nsemantic_version==2.6.*\nmake\npygraphviz\n"
},
{
"path": "docs/sources/Makefile",
"chars": 7505,
"preview": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS =\nSPHINXBUILD "
},
{
"path": "docs/sources/changelog.rst",
"chars": 3032,
"preview": "=========\nChangelog\n=========\n\n* :release:`to be discussed`\n* :feature:`#75` Automatically gather CI build information f"
},
{
"path": "docs/sources/conf.py",
"chars": 7645,
"preview": "# -*- coding: utf-8 -*-\n#\n# pytest-monitor documentation build configuration file, created by\n# sphinx-quickstart on Thu"
},
{
"path": "docs/sources/configuration.rst",
"chars": 8314,
"preview": "========================\nConfiguring your session\n========================\n\n`pytest-monitor` gives you flexibility for r"
},
{
"path": "docs/sources/contributing.rst",
"chars": 2579,
"preview": "==================\nContribution guide\n==================\n\nIf you want to contribute to this project, you are welcome to "
},
{
"path": "docs/sources/index.rst",
"chars": 576,
"preview": ".. pytest-monitor documentation master file, created by\n sphinx-quickstart on Thu Oct 1 00:43:18 2015.\n You can ada"
},
{
"path": "docs/sources/installation.rst",
"chars": 671,
"preview": "============\nInstallation\n============\n\n`pytest-monitor` is a plugin for `pytest`.\n\nSupported environments\n-------------"
},
{
"path": "docs/sources/introduction.rst",
"chars": 1580,
"preview": "============\nIntroduction\n============\n\n`pytest-monitor` tracks the resources (like memory and compute time) consumed by"
},
{
"path": "docs/sources/make.bat",
"chars": 7029,
"preview": "@ECHO OFF\n\nREM Command file for Sphinx documentation\n\nif \"%SPHINXBUILD%\" == \"\" (\n\tset SPHINXBUILD=sphinx-build\n)\nset BUI"
},
{
"path": "docs/sources/operating.rst",
"chars": 4826,
"preview": "==================\nOperating measures\n==================\n\nStorage\n-------\n\nOnce measures are collected, `pytest-monitor`"
},
{
"path": "docs/sources/remote.rst",
"chars": 3777,
"preview": "Use of a remote server\n======================\n\nYou can easily send your metrics to a remote server. This can turn useful"
},
{
"path": "docs/sources/run.rst",
"chars": 6150,
"preview": "========================\nManaging your test suite\n========================\n\n`pytest-monitor` does not require any specif"
},
{
"path": "examples/pkg1/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "examples/pkg1/test_mod1.py",
"chars": 350,
"preview": "import time\n\nimport pytest\n\n\ndef test_sleep1():\n time.sleep(1)\n\n\n@pytest.mark.monitor_skip_test()\ndef test_sleep2():\n"
},
{
"path": "examples/pkg1/test_mod2.py",
"chars": 58,
"preview": "import time\n\n\ndef test_sleep_400ms():\n time.sleep(0.4)\n"
},
{
"path": "examples/pkg2/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "examples/pkg2/test_mod_a.py",
"chars": 174,
"preview": "import time\n\n\ndef test_master_sleep():\n t_a = time.time()\n b_continue = True\n while b_continue:\n t_delta"
},
{
"path": "examples/pkg3/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "examples/pkg3/test_mod_cl.py",
"chars": 237,
"preview": "import time\n\n\nclass TestClass:\n def setup_method(self, test_method):\n self.__value = test_method.__name__\n "
},
{
"path": "examples/pkg4/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "examples/pkg4/test_mod_a.py",
"chars": 460,
"preview": "import time\n\nimport pytest\n\npytestmark = pytest.mark.monitor_skip_test\n\npytest_monitor_component = \"test\"\n\n\ndef test_not"
},
{
"path": "examples/pkg5/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "examples/pkg5/doctest.py",
"chars": 190,
"preview": "def run(a, b):\n \"\"\"\n >>> a = 3\n >>> b = 30\n >>> run(a, b)\n 33\n \"\"\"\n return a + b\n\n\ndef try_doctest("
},
{
"path": "examples/pkg5/test_special_pytest.py",
"chars": 307,
"preview": "import pytest\n\n\n@pytest.mark.skip(reason=\"Some special test to skip\")\ndef test_is_skipped():\n assert True\n\n\ndef test_"
},
{
"path": "pyproject.toml",
"chars": 2084,
"preview": "[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[tool.distutils.bdist_wheel]\nuniversal"
},
{
"path": "pytest_monitor/__init__.py",
"chars": 86,
"preview": "import importlib.metadata\n\n__version__ = importlib.metadata.version(\"pytest-monitor\")\n"
},
{
"path": "pytest_monitor/handler.py",
"chars": 4573,
"preview": "import sqlite3\n\n\nclass DBHandler:\n def __init__(self, db_path):\n self.__db = db_path\n self.__cnx = sqli"
},
{
"path": "pytest_monitor/pytest_monitor.py",
"chars": 11483,
"preview": "# -*- coding: utf-8 -*-\nimport gc\nimport time\nimport warnings\n\nimport memory_profiler\nimport pytest\n\nfrom pytest_monitor"
},
{
"path": "pytest_monitor/session.py",
"chars": 6767,
"preview": "import datetime\nimport hashlib\nimport json\nimport os\nimport warnings\nfrom http import HTTPStatus\n\nimport memory_profiler"
},
{
"path": "pytest_monitor/sys_utils.py",
"chars": 6104,
"preview": "import hashlib\nimport multiprocessing\nimport os\nimport platform\nimport socket\nimport subprocess\nimport sys\nimport warnin"
},
{
"path": "requirements.dev.txt",
"chars": 236,
"preview": "psutil>=5.1.0\nmemory_profiler>=0.58\npytest\nrequests\nblack\nisort\nflake8=6.1.0\nflake8-builtins=2.1.0\nflake8-simplify=0.19."
},
{
"path": "requirements.txt",
"chars": 52,
"preview": "psutil>=5.1.0\nmemory_profiler>=0.58\npytest\nrequests\n"
},
{
"path": "tests/conftest.py",
"chars": 30,
"preview": "pytest_plugins = [\"pytester\"]\n"
},
{
"path": "tests/test_monitor.py",
"chars": 11529,
"preview": "# -*- coding: utf-8 -*-\nimport json\nimport pathlib\nimport sqlite3\n\nimport pytest\n\n\ndef test_monitor_basic_test(testdir):"
},
{
"path": "tests/test_monitor_component.py",
"chars": 4623,
"preview": "# -*- coding: utf-8 -*-\nimport pathlib\nimport sqlite3\n\n\ndef test_monitor_no_component(testdir):\n \"\"\"Make sure that py"
},
{
"path": "tests/test_monitor_context.py",
"chars": 5300,
"preview": "import os\nimport pathlib\nimport sqlite3\n\nimport mock\nimport pytest\n\nCPU_FREQ_PATH = \"pytest_monitor.sys_utils.psutil.cpu"
},
{
"path": "tests/test_monitor_in_ci.py",
"chars": 15469,
"preview": "# -*- coding: utf-8 -*-\nimport os\nimport pathlib\nimport sqlite3\n\n\ndef test_monitor_no_ci(testdir):\n \"\"\"Make sure that"
},
{
"path": "tox.ini",
"chars": 296,
"preview": "# For more information about tox, see https://tox.readthedocs.io/en/latest/\n[tox]\nenvlist = py27,py34,py35,py36,py37,pyp"
}
]
About this extraction
This page contains the full source code of the CFMTech/pytest-monitor GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 53 files (156.2 KB), approximately 41.1k tokens, and a symbol index with 90 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.