[
  {
    "path": ".coveragerc",
    "content": "[report]\ninclude =\n  tests/*\n  xmlrunner/*\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "<!--\nThank you for your contribution and taking the time to report the issue!\nNote: Please search to see if an issue already exists for the bug you encountered.\n-->\n\n---\nname: 🐞 Bug\nabout: File a bug/issue\ntitle: '[BUG] <title>'\nlabels: Bug, Needs Triage\nassignees: ''\n\n---\n\n\n### Current Behavior:\n<!-- A concise description of what you're experiencing. -->\n\n### Expected Behavior:\n<!-- A concise description of what you expected to happen. -->\n\n### Steps To Reproduce:\n<!--\nExample: steps to reproduce the behavior:\n1. In this environment...\n1. With this config...\n1. Run '...'\n1. See error...\n\nor a minimal example\n-->\n\n### Environment:\n<!--\nExample:\n- OS: Ubuntu 20.04\n- Python: 3.14.0\n- xmlrunner: 4.0.0\noutput of `pip list`\n-->\n\n### Anything else:\n<!--\nLinks? References? Anything that will give us more context about the issue that you are encountering!\n-->\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "<!--\nThank you for your contribution and taking the time to report the issue!\nNote: Please search to see if an issue already exists for the bug you encountered.\n-->\n\n---\nname: 🚀 Feature Request\nabout: File a feature request\ntitle: '[Feature] <title>'\nlabels: Feature, Needs Triage\nassignees: ''\n\n---\n\n\n### Current Behavior:\n<!--\nExplain why the current behavior has shortcomings.\n-->\n\n### Feature Behavior:\n<!--\nA detailed description of the feature.\nExplain the pros/cons of this approach.\nExplain the target audience for this feature,\ne.g. is it for a very niche use case? or will everybody use it?\n-->\n\n### Test Plan for the feature\n<!--\nHow to test the new feature and ensure it plays nice with other existing features.\nHow to verify that it works?\nIdeas for unit tests?\n-->\n\n### Alternative solutions:\n<!--\nExplain how you are currently achieving the results a different way.\nExplain the pros/cons of the alternative solutions.\n-->\n\n### Environment:\n<!--\nExample:\n- OS: Ubuntu 20.04\n- Python: 3.14.0\n- xmlrunner: 4.0.0\noutput of `pip list`\n-->\n\n### Anything else:\n<!--\nLinks? References?\nAnything that will give us more context about the feature request!\n-->\n"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "<!--\nThank you for submitting a PR and your contribution!\n\nQuick checklist\n\n- [ ] Include unit tests when applicable\n- [ ] Documentation and comments\n- [ ] Reference existing issues, e.g. `see issue #123`, `closes #123`\n- [ ] Reference existing pull requests, e.g. `supersedes PR #123`\n\nSee the [github docs](https://help.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) for more information.\n\n-->"
  },
  {
    "path": ".github/workflows/tests.yml",
    "content": "name: Tests\n\non:\n  push:\n    branches:\n      - master\n  pull_request:\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        name: [\n          \"py310\",\n          \"py311\",\n          \"py312\",\n          \"py313\",\n          \"py314\",\n        ]\n        include:\n          - name: py310\n            python-version: \"3.10\"\n            tox_env: py310\n          - name: py311\n            python-version: \"3.11\"\n            tox_env: py311\n          - name: py312\n            python-version: \"3.12\"\n            tox_env: py312\n          - name: py313\n            python-version: \"3.13\"\n            tox_env: py313\n          - name: py314\n            python-version: \"3.14\"\n            tox_env: py314\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v2\n      - name: Setup Python ${{ matrix.python-version }}\n        uses: actions/setup-python@v2\n        with:\n          python-version: ${{ matrix.python-version }}\n      - name: Before Install\n        run: |\n          python --version\n          uname -a\n          lsb_release -a\n      - name: Install\n        env:\n          TOXENV: ${{ matrix.tox_env }}\n        run: |\n          pip install tox-gh-actions codecov coveralls\n          pip --version\n          tox --version\n      - name: Script\n        run: |\n          tox run -v\n      - name: After Failure\n        if: ${{ failure() }}\n        run: |\n          more .tox/log/* | cat\n          more .tox/*/log/* | cat\n      - name: Upload to Codecov\n        run: |\n          tox -e uploadcodecov\n        env:\n          CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}\n"
  },
  {
    "path": ".gitignore",
    "content": "# Python bytecode\n*.pyc\n\n# Build directory\nbuild/*\ndist/*\n\n# Egg info directory\n*.egg-info\n\n# tox + coverage\n.tox\n.coverage\nhtmlcov/\n\n# autogenerated\nxmlrunner/version.py\n"
  },
  {
    "path": ".landscape.yml",
    "content": "doc-warnings: true\ntest-warnings: false\nstrictness: veryhigh\nmax-line-length: 80\nautodetect: true\npython-targets:\n  - 2\n  - 3\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2008-2013, Daniel Fernandes Martins\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met: \n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer. \n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution. \n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nThe views and conclusions contained in the software and documentation are those\nof the authors and should not be interpreted as representing official policies, \neither expressed or implied, of the FreeBSD Project."
  },
  {
    "path": "MANIFEST.in",
    "content": "include README.md\ninclude LICENSE"
  },
  {
    "path": "Makefile",
    "content": "\nbuild/tox/bin:\n\tpython3 -m venv build/tox\n\tbuild/tox/bin/pip install tox\n\nbuild/publish/bin:\n\tpython3 -m venv build/publish\n\tbuild/publish/bin/pip install build wheel twine\n\ncheckversion:\n\tgit log -1 --oneline | grep -q \"Bump version\" || (echo \"DID NOT DO VERSION BUMP\"; exit 1)\n\tgit show-ref --tags | grep -q $$(git log -1 --pretty=%H) || (echo \"DID NOT TAG VERSION\"; exit 1)\n\ndist: checkversion build/publish/bin\n\tbuild/publish/bin/python -m build\n\npublish: dist/ build/publish/bin\n\tbuild/publish/bin/twine upload dist/*\n\ntest: build/tox/bin\n\tbuild/tox/bin/tox\n\nclean:\n\trm -rf build/ dist/\n\n.PHONY: checkversion dist publish clean test\n"
  },
  {
    "path": "README.md",
    "content": "[![License](https://img.shields.io/pypi/l/unittest-xml-reporting.svg)](https://pypi.python.org/pypi/unittest-xml-reporting/)\n[![Latest Version](https://img.shields.io/pypi/v/unittest-xml-reporting.svg)](https://pypi.python.org/pypi/unittest-xml-reporting/)\n[![Development Status](https://img.shields.io/pypi/status/unittest-xml-reporting.svg)](https://pypi.python.org/pypi/unittest-xml-reporting/)\n[![Documentation Status](https://readthedocs.org/projects/unittest-xml-reporting/badge/?version=latest)](http://unittest-xml-reporting.readthedocs.io/en/latest/?badge=latest)\n\n[![codecov.io Coverage Status](https://codecov.io/github/xmlrunner/unittest-xml-reporting/coverage.svg?branch=master)](https://codecov.io/github/xmlrunner/unittest-xml-reporting?branch=master)\n[![Coveralls Coverage Status](https://coveralls.io/repos/xmlrunner/unittest-xml-reporting/badge.svg?branch=master&service=github)](https://coveralls.io/github/xmlrunner/unittest-xml-reporting?branch=master)\n\n\n# unittest-xml-reporting (aka xmlrunner)\n\nA unittest test runner that can save test results to XML files in xUnit format.\nThe files can be consumed by a wide range of tools, such as build systems, IDEs\nand continuous integration servers.\n\n\n## PyPI\n\n- PyPI page for the project: https://pypi.org/project/unittest-xml-reporting/\n- PyPI download stats: https://pypistats.org/packages/unittest-xml-reporting\n\n\n## Contributing\n\nWe are always looking for good contributions, so please just fork the\nrepository and send pull requests (with tests please!).\n\nIf you would like write access to the repository, or become a maintainer,\nfeel free to get in touch.\n\n\n## Requirements\n\nSee [Status of Python versions](https://devguide.python.org/versions/) for python EOL information.\n\n* Python 3.10+\n* Last version supporting Python 3.7 - 3.9 was 3.2.0\n* Please note Python 3.6 end-of-life was in Dec 2021, last version supporting 3.6 was 3.1.0\n* Please note Python 3.5 end-of-life was in Sep 2020, last version supporting 3.5 was 3.1.0\n* Please note Python 2.7 end-of-life was in Jan 2020, last version supporting 2.7 was 2.5.2\n* Please note Python 3.4 end-of-life was in Mar 2019, last version supporting 3.4 was 2.5.2\n* Please note Python 2.6 end-of-life was in Oct 2013, last version supporting 2.6 was 1.14.0\n\n\n## Limited support for `unittest.TestCase.subTest`\n\nhttps://docs.python.org/3/library/unittest.html#unittest.TestCase.subTest\n\n`unittest` has the concept of sub-tests for a `unittest.TestCase`; this doesn't map well to an existing xUnit concept, so you won't find it in the schema. What that means, is that you lose some granularity\nin the reports for sub-tests.\n\n`unittest` also does not report successful sub-tests, so the accounting won't be exact.\n\n## Jenkins plugins\n\n- Jenkins JUnit plugin : https://plugins.jenkins.io/junit/\n- Jenkins xUnit plugin : https://plugins.jenkins.io/xunit/\n\n### Jenkins JUnit plugin\n\nThis plugin does not perform XSD validation (at time of writing) and should parse the XML file without issues.\n\n### Jenkins xUnit plugin version 1.100\n\n- [Jenkins (junit-10.xsd), xunit plugin (2014-2018)](https://github.com/jenkinsci/xunit-plugin/blob/14c6e39c38408b9ed6280361484a13c6f5becca7/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd), version `1.100`.\n\nThis plugin does perfom XSD validation and uses the more lax XSD. This should parse the XML file without issues.\n\n### Jenkins xUnit plugin version 1.104+\n\n- [Jenkins (junit-10.xsd), xunit plugin (2018-current)](https://github.com/jenkinsci/xunit-plugin/blob/ae25da5089d4f94ac6c4669bf736e4d416cc4665/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd), version `1.104`+.\n\nThis plugin does perfom XSD validation and uses the more strict XSD.\n\nSee https://github.com/xmlrunner/unittest-xml-reporting/issues/209\n\n```\nimport io\nimport unittest\nimport xmlrunner\n\n# run the tests storing results in memory\nout = io.BytesIO()\nunittest.main(\n    testRunner=xmlrunner.XMLTestRunner(output=out),\n    failfast=False, buffer=False, catchbreak=False, exit=False)\n```\n\nTransform the results removing extra attributes.\n```\nfrom xmlrunner.extra.xunit_plugin import transform\n\nwith open('TEST-report.xml', 'wb') as report:\n    report.write(transform(out.getvalue()))\n\n```\n\n## JUnit Schema ?\n\nThere are many tools claiming to write JUnit reports, so you will find many schemas with minor differences.\n\nWe used the XSD that was available in the Jenkins xUnit plugin version `1.100`; a copy is available under `tests/vendor/jenkins/xunit-plugin/.../junit-10.xsd` (see attached license).\n\nYou may also find these resources useful:\n\n- https://stackoverflow.com/questions/4922867/what-is-the-junit-xml-format-specification-that-hudson-supports\n- https://stackoverflow.com/questions/11241781/python-unittests-in-jenkins\n- [JUnit-Schema (JUnit.xsd)](https://github.com/windyroad/JUnit-Schema/blob/master/JUnit.xsd)\n- [Windyroad (JUnit.xsd)](http://windyroad.com.au/dl/Open%20Source/JUnit.xsd)\n- [a gist (Jenkins xUnit test result schema)](https://gist.github.com/erikd/4192748)\n\n\n## Installation\n\nThe easiest way to install unittest-xml-reporting is via\n[Pip](http://www.pip-installer.org):\n\n````bash\n$ pip install unittest-xml-reporting\n````\n\nIf you use Git and want to get the latest *development* version:\n\n````bash\n$ git clone https://github.com/xmlrunner/unittest-xml-reporting.git\n$ cd unittest-xml-reporting\n$ sudo python -m pip install .\n````\n\nOr get the latest *development* version as a tarball:\n\n````bash\n$ wget https://github.com/xmlrunner/unittest-xml-reporting/archive/master.zip\n$ unzip master.zip\n$ cd unittest-xml-reporting\n$ sudo python -m pip install .\n````\n\nOr you can manually download the latest released version from\n[PyPI](https://pypi.python.org/pypi/unittest-xml-reporting/).\n\n\n## Command-line\n\n````bash\npython -m xmlrunner [options]\npython -m xmlrunner discover [options]\n\n# help\npython -m xmlrunner -h\n````\n\ne.g. \n````bash\npython -m xmlrunner discover -t ~/mycode/tests -o /tmp/build/junit-reports\n````\n\n## Usage\n\nThe script below, adapted from the\n[unittest](http://docs.python.org/library/unittest.html), shows how to use\n`XMLTestRunner` in a very simple way. In fact, the only difference between\nthis script and the original one is the last line:\n\n````python\nimport random\nimport unittest\nimport xmlrunner\n\nclass TestSequenceFunctions(unittest.TestCase):\n\n    def setUp(self):\n        self.seq = list(range(10))\n\n    @unittest.skip(\"demonstrating skipping\")\n    def test_skipped(self):\n        self.fail(\"shouldn't happen\")\n\n    def test_shuffle(self):\n        # make sure the shuffled sequence does not lose any elements\n        random.shuffle(self.seq)\n        self.seq.sort()\n        self.assertEqual(self.seq, list(range(10)))\n\n        # should raise an exception for an immutable sequence\n        self.assertRaises(TypeError, random.shuffle, (1,2,3))\n\n    def test_choice(self):\n        element = random.choice(self.seq)\n        self.assertTrue(element in self.seq)\n\n    def test_sample(self):\n        with self.assertRaises(ValueError):\n            random.sample(self.seq, 20)\n        for element in random.sample(self.seq, 5):\n            self.assertTrue(element in self.seq)\n\nif __name__ == '__main__':\n    unittest.main(\n        testRunner=xmlrunner.XMLTestRunner(output='test-reports'),\n        # these make sure that some options that are not applicable\n        # remain hidden from the help menu.\n        failfast=False, buffer=False, catchbreak=False)\n````\n\n### Reporting to a single file\n\n````python\nif __name__ == '__main__':\n    with open('/path/to/results.xml', 'wb') as output:\n        unittest.main(\n            testRunner=xmlrunner.XMLTestRunner(output=output),\n            failfast=False, buffer=False, catchbreak=False)\n````\n\n### Doctest support\n\nThe XMLTestRunner can also be used to report on docstrings style tests.\n\n````python\nimport doctest\nimport xmlrunner\n\ndef twice(n):\n    \"\"\"\n    >>> twice(5)\n    10\n    \"\"\"\n    return 2 * n\n\nclass Multiplicator(object):\n    def threetimes(self, n):\n        \"\"\"\n        >>> Multiplicator().threetimes(5)\n        15\n        \"\"\"\n        return 3 * n\n\nif __name__ == \"__main__\":\n    suite = doctest.DocTestSuite()\n    xmlrunner.XMLTestRunner().run(suite)\n````\n\n### Django support\n\nIn order to plug `XMLTestRunner` to a Django project, add the following\nto your `settings.py`:\n\n````python\nTEST_RUNNER = 'xmlrunner.extra.djangotestrunner.XMLTestRunner'\n````\n\nAlso, the following settings are provided so you can fine tune the reports:\n\n|setting|default|values|description|\n|-|-|-|-|\n|`TEST_OUTPUT_VERBOSE`|`1`|`0\\|1\\|2`|Besides the XML reports generated by the test runner, a bunch of useful information is printed to the `sys.stderr` stream, just like the `TextTestRunner` does. Use this setting to choose between a verbose and a non-verbose output.|\n|`TEST_OUTPUT_DESCRIPTIONS`|`False`|`True\\|False`|If your test methods contains docstrings, you can display such docstrings instead of display the test name (ex: `module.TestCase.test_method`).<br>In order to use this feature, you have to enable verbose output by setting `TEST_OUTPUT_VERBOSE = 2`.<br>Only effects stdout and not XML output.|\n|`TEST_OUTPUT_DIR`|`\".\"`|`<str>`|Tells the test runner where to put the XML reports. If the directory couldn't be found, the test runner will try to create it before generate the XML files.|\n|`TEST_OUTPUT_FILE_NAME`|`None`|`<str>`|Tells the test runner to output a single XML report with this filename under `os.path.join(TEST_OUTPUT_DIR, TEST_OUTPUT_FILE_NAME)`.<br>Please note that for long running tests, this will keep the results in memory for a longer time than multiple reports, and may use up more resources.|\n\n\n### Testing changes with `tox`\n\nPlease use `tox` to test your changes before sending a pull request.\nYou can find more information about `tox` at <https://testrun.org/tox/latest/>.\n\n```bash\n$ pip install tox\n\n# basic sanity test, friendly output\n$ tox -e pytest\n\n# all combinations\n$ tox\n```\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nPAPER         =\nBUILDDIR      = _build\n\n# Internal variables.\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n# the i18n builder cannot share the environment and doctrees with the others\nI18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n\n.PHONY: help\nhelp:\n\t@echo \"Please use \\`make <target>' where <target> is one of\"\n\t@echo \"  html       to make standalone HTML files\"\n\t@echo \"  dirhtml    to make HTML files named index.html in directories\"\n\t@echo \"  singlehtml to make a single large HTML file\"\n\t@echo \"  pickle     to make pickle files\"\n\t@echo \"  json       to make JSON files\"\n\t@echo \"  htmlhelp   to make HTML files and a HTML help project\"\n\t@echo \"  qthelp     to make HTML files and a qthelp project\"\n\t@echo \"  applehelp  to make an Apple Help Book\"\n\t@echo \"  devhelp    to make HTML files and a Devhelp project\"\n\t@echo \"  epub       to make an epub\"\n\t@echo \"  epub3      to make an epub3\"\n\t@echo \"  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \"  latexpdf   to make LaTeX files and run them through pdflatex\"\n\t@echo \"  latexpdfja to make LaTeX files and run them through platex/dvipdfmx\"\n\t@echo \"  text       to make text files\"\n\t@echo \"  man        to make manual pages\"\n\t@echo \"  texinfo    to make Texinfo files\"\n\t@echo \"  info       to make Texinfo files and run them through makeinfo\"\n\t@echo \"  gettext    to make PO message catalogs\"\n\t@echo \"  changes    to make an overview of all changed/added/deprecated items\"\n\t@echo \"  xml        to make Docutils-native XML files\"\n\t@echo \"  pseudoxml  to make pseudoxml-XML files for display purposes\"\n\t@echo \"  linkcheck  to check all external links for integrity\"\n\t@echo \"  doctest    to run all doctests embedded in the documentation (if enabled)\"\n\t@echo \"  coverage   to run coverage check of the documentation (if enabled)\"\n\t@echo \"  dummy      to check syntax errors of document sources\"\n\n.PHONY: clean\nclean:\n\trm -rf $(BUILDDIR)/*\n\n.PHONY: html\nhtml:\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\n.PHONY: dirhtml\ndirhtml:\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\n.PHONY: singlehtml\nsinglehtml:\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\n.PHONY: pickle\npickle:\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\n.PHONY: json\njson:\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\n.PHONY: htmlhelp\nhtmlhelp:\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t      \".hhp project file in $(BUILDDIR)/htmlhelp.\"\n\n.PHONY: qthelp\nqthelp:\n\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp\n\t@echo\n\t@echo \"Build finished; now you can run \"qcollectiongenerator\" with the\" \\\n\t      \".qhcp project file in $(BUILDDIR)/qthelp, like this:\"\n\t@echo \"# qcollectiongenerator $(BUILDDIR)/qthelp/unittest-xml-reporting.qhcp\"\n\t@echo \"To view the help file:\"\n\t@echo \"# assistant -collectionFile $(BUILDDIR)/qthelp/unittest-xml-reporting.qhc\"\n\n.PHONY: applehelp\napplehelp:\n\t$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp\n\t@echo\n\t@echo \"Build finished. The help book is in $(BUILDDIR)/applehelp.\"\n\t@echo \"N.B. You won't be able to view it unless you put it in\" \\\n\t      \"~/Library/Documentation/Help or install it in your application\" \\\n\t      \"bundle.\"\n\n.PHONY: devhelp\ndevhelp:\n\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp\n\t@echo\n\t@echo \"Build finished.\"\n\t@echo \"To view the help file:\"\n\t@echo \"# mkdir -p $$HOME/.local/share/devhelp/unittest-xml-reporting\"\n\t@echo \"# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/unittest-xml-reporting\"\n\t@echo \"# devhelp\"\n\n.PHONY: epub\nepub:\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\n.PHONY: epub3\nepub3:\n\t$(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3\n\t@echo\n\t@echo \"Build finished. The epub3 file is in $(BUILDDIR)/epub3.\"\n\n.PHONY: latex\nlatex:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in $(BUILDDIR)/latex.\"\n\t@echo \"Run \\`make' in that directory to run these through (pdf)latex\" \\\n\t      \"(use \\`make latexpdf' here to do that automatically).\"\n\n.PHONY: latexpdf\nlatexpdf:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through pdflatex...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\n.PHONY: latexpdfja\nlatexpdfja:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through platex and dvipdfmx...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\n.PHONY: text\ntext:\n\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text\n\t@echo\n\t@echo \"Build finished. The text files are in $(BUILDDIR)/text.\"\n\n.PHONY: man\nman:\n\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(BUILDDIR)/man.\"\n\n.PHONY: texinfo\ntexinfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo\n\t@echo \"Build finished. The Texinfo files are in $(BUILDDIR)/texinfo.\"\n\t@echo \"Run \\`make' in that directory to run these through makeinfo\" \\\n\t      \"(use \\`make info' here to do that automatically).\"\n\n.PHONY: info\ninfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo \"Running Texinfo files through makeinfo...\"\n\tmake -C $(BUILDDIR)/texinfo info\n\t@echo \"makeinfo finished; the Info files are in $(BUILDDIR)/texinfo.\"\n\n.PHONY: gettext\ngettext:\n\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale\n\t@echo\n\t@echo \"Build finished. The message catalogs are in $(BUILDDIR)/locale.\"\n\n.PHONY: changes\nchanges:\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes\n\t@echo\n\t@echo \"The overview file is in $(BUILDDIR)/changes.\"\n\n.PHONY: linkcheck\nlinkcheck:\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t      \"or in $(BUILDDIR)/linkcheck/output.txt.\"\n\n.PHONY: doctest\ndoctest:\n\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest\n\t@echo \"Testing of doctests in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/doctest/output.txt.\"\n\n.PHONY: coverage\ncoverage:\n\t$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage\n\t@echo \"Testing of coverage in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/coverage/python.txt.\"\n\n.PHONY: xml\nxml:\n\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml\n\t@echo\n\t@echo \"Build finished. The XML files are in $(BUILDDIR)/xml.\"\n\n.PHONY: pseudoxml\npseudoxml:\n\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml\n\t@echo\n\t@echo \"Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml.\"\n\n.PHONY: dummy\ndummy:\n\t$(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy\n\t@echo\n\t@echo \"Build finished. Dummy builder generates no files.\"\n"
  },
  {
    "path": "docs/conf.py",
    "content": "# -*- coding: utf-8 -*-\n#\n# unittest-xml-reporting documentation build configuration file, created by\n# sphinx-quickstart on Mon May 30 11:39:40 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n    'sphinx.ext.autodoc',\n    'sphinx.ext.doctest',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.md']\n\n# The encoding of source files.\n#\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'unittest-xml-reporting'\ncopyright = u'2016, Daniel Fernandes Martins, Damien Nozay'\nauthor = u'Daniel Fernandes Martins, Damien Nozay'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u'2.1.0'\n# The full version, including alpha/beta/rc tags.\nrelease = u'2.1.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#\n# today = ''\n#\n# Else, today_fmt is used as the format for a strftime call.\n#\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#\n# html_title = u'unittest-xml-reporting v2.1.0'\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n#\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#\n# html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#\n# html_extra_path = []\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#\n# html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n#\n# html_domain_indices = True\n\n# If false, no index is generated.\n#\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n#\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'unittest-xml-reportingdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n     # The paper size ('letterpaper' or 'a4paper').\n     #\n     # 'papersize': 'letterpaper',\n\n     # The font size ('10pt', '11pt' or '12pt').\n     #\n     # 'pointsize': '10pt',\n\n     # Additional stuff for the LaTeX preamble.\n     #\n     # 'preamble': '',\n\n     # Latex figure (float) alignment\n     #\n     # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n#  author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n    (master_doc, 'unittest-xml-reporting.tex', u'unittest-xml-reporting Documentation',\n     u'Daniel Fernandes Martins, Damien Nozay', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n#\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#\n# latex_appendices = []\n\n# If false, no module index is generated.\n#\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n    (master_doc, 'unittest-xml-reporting', u'unittest-xml-reporting Documentation',\n     [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n#  dir menu entry, description, category)\ntexinfo_documents = [\n    (master_doc, 'unittest-xml-reporting', u'unittest-xml-reporting Documentation',\n     author, 'unittest-xml-reporting', 'One line description of project.',\n     'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n#\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#\n# texinfo_no_detailmenu = False\n"
  },
  {
    "path": "docs/index.rst",
    "content": "unittest-xml-reporting\n======================\n\n``unittest-xml-reporting`` is a ``unittest`` test runner that can save\ntest results to XML files (jUnit) and be consumed by a wide range of\ntools such as continuous integration systems.\n\n\nGetting started\n===============\n\nSimilar to the ``unittest`` module, you can run::\n\n    python -m xmlrunner test_module\n    python -m xmlrunner module.TestClass\n    python -m xmlrunner module.Class.test_method\n\nas well as::\n\n    python -m xmlrunner discover [options]\n\nYou can also add a top level file to allow running the tests with\nthe command ``python tests.py``, and configure the test runner\nto output the XML reports in the ``test-reports`` directory. ::\n\n    # tests.py\n\n    if __name__ == '__main__':\n        unittest.main(\n            testRunner=xmlrunner.XMLTestRunner(output='test-reports'),\n            # these make sure that some options that are not applicable\n            # remain hidden from the help menu.\n            failfast=False, buffer=False, catchbreak=False)\n\n\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nbuild-backend = \"setuptools.build_meta\"\nrequires = [\n    \"setuptools>=77\",\n    \"setuptools-scm[toml]>=6.2.3\",\n]\n\n\n[project]\nname = \"unittest-xml-reporting\"\nlicense = \"BSD-2-Clause\"\nlicense-files = [ \"LICENSE\" ]\ndynamic = [\n    \"version\",\n]\ndescription = \"unittest-based test runner with Ant/JUnit like XML reporting.\"\nkeywords = [\n    \"pyunit\",\n    \"unittest\",\n    \"junit xml\",\n    \"xunit\",\n    \"report\",\n    \"testrunner\",\n    \"xmlrunner\",\n]\nreadme = \"README.md\"\nauthors = [\n    {name = \"Daniel Fernandes Martins\"},\n    {name = \"Damien Nozay\"},\n]\nclassifiers = [\n    \"Development Status :: 5 - Production/Stable\",\n    \"Intended Audience :: Developers\",\n    \"Natural Language :: English\",\n    \"Operating System :: OS Independent\",\n    \"Programming Language :: Python :: 3 :: Only\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\",\n    \"Programming Language :: Python :: 3.13\",\n    \"Programming Language :: Python :: 3.14\",\n    \"Programming Language :: Python :: Implementation :: CPython\",\n    \"Programming Language :: Python :: Implementation :: PyPy\",\n    \"Topic :: Software Development :: Libraries :: Python Modules\",\n    \"Topic :: Software Development :: Testing\",\n]\nrequires-python = \">=3.10\"\ndependencies = [\n    \"lxml\",\n]\n\n[project.urls]\nHomepage = \"http://github.com/xmlrunner/unittest-xml-reporting/tree/master/\"\n\n\n[tool.setuptools_scm]\nwrite_to = \"xmlrunner/version.py\"\n\n[tool.distutils.bdist_wheel]\nuniversal = 1\npython-tag = \"py3\"\n"
  },
  {
    "path": "tests/__init__.py",
    "content": ""
  },
  {
    "path": "tests/builder_test.py",
    "content": "# -*- coding: utf-8\n\nfrom xmlrunner.unittest import unittest\n\nimport xml.etree.ElementTree as ET\nfrom xml.dom.minidom import Document\n\nfrom xmlrunner import builder\n\n\nclass TestXMLContextTest(unittest.TestCase):\n    \"\"\"TestXMLContext test cases.\n    \"\"\"\n\n    def setUp(self):\n        self.doc = Document()\n        self.root = builder.TestXMLContext(self.doc)\n\n    def test_current_element_tag_name(self):\n        self.root.begin('tag', 'context-name')\n        self.assertEqual(self.root.element_tag(), 'tag')\n\n    def test_current_context_name(self):\n        self.root.begin('tag', 'context-name')\n        name = self.root.element.getAttribute('name')\n        self.assertEqual(name, 'context-name')\n\n    def test_current_context_invalid_unicode_name(self):\n        self.root.begin('tag', u'context-name\\x01\\x0B')\n        name = self.root.element.getAttribute('name')\n        self.assertEqual(name, u'context-name\\uFFFD\\uFFFD')\n\n    def test_increment_valid_testsuites_counters(self):\n        self.root.begin('testsuites', 'name')\n\n        for c in ('tests', 'failures', 'errors', 'skipped'):\n            self.root.increment_counter(c)\n\n        element = self.root.end()\n\n        with self.assertRaises(KeyError):\n            element.attributes['skipped']\n\n        for c in ('tests', 'failures', 'errors'):\n            value = element.attributes[c].value\n            self.assertEqual(value, '1')\n\n    def test_increment_valid_testsuite_counters(self):\n        self.root.begin('testsuite', 'name')\n\n        for c in ('tests', 'failures', 'errors', 'skipped'):\n            self.root.increment_counter(c)\n\n        element = self.root.end()\n\n        for c in ('tests', 'failures', 'errors', 'skipped'):\n            value = element.attributes[c].value\n            self.assertEqual(value, '1')\n\n    def test_increment_counters_for_unknown_context(self):\n        self.root.begin('unknown', 'name')\n\n        for c in ('tests', 'failures', 'errors', 'skipped', 'invalid'):\n            self.root.increment_counter(c)\n\n        element = self.root.end()\n\n        for c in ('tests', 'failures', 'errors', 'skipped', 'invalid'):\n            with self.assertRaises(KeyError):\n                element.attributes[c]\n\n    def test_empty_counters_on_end_context(self):\n        self.root.begin('testsuite', 'name')\n        element = self.root.end()\n\n        for c in ('tests', 'failures', 'errors', 'skipped'):\n            self.assertEqual(element.attributes[c].value, '0')\n\n    def test_add_time_attribute_on_end_context(self):\n        self.root.begin('testsuite', 'name')\n        element = self.root.end()\n\n        element.attributes['time'].value\n\n    def test_add_timestamp_attribute_on_end_context(self):\n        self.root.begin('testsuite', 'name')\n        element = self.root.end()\n\n        element.attributes['timestamp'].value\n\n\nclass TestXMLBuilderTest(unittest.TestCase):\n    \"\"\"TestXMLBuilder test cases.\n    \"\"\"\n\n    def setUp(self):\n        self.builder = builder.TestXMLBuilder()\n        self.doc = self.builder._xml_doc\n        self.builder.begin_context('testsuites', 'name')\n\n        self.valid_chars = u'выбор'\n\n        self.invalid_chars = '\\x01'\n        self.invalid_chars_replace = u'\\ufffd'\n\n    def test_root_has_no_parent(self):\n        self.assertIsNone(self.builder.current_context().parent)\n\n    def test_current_context_tag(self):\n        self.assertEqual(self.builder.context_tag(), 'testsuites')\n\n    def test_begin_nested_context(self):\n        root = self.builder.current_context()\n\n        self.builder.begin_context('testsuite', 'name')\n\n        self.assertEqual(self.builder.context_tag(), 'testsuite')\n        self.assertIs(self.builder.current_context().parent, root)\n\n    def test_end_inexistent_context(self):\n        self.builder = builder.TestXMLBuilder()\n\n        self.assertFalse(self.builder.end_context())\n        self.assertEqual(len(self.doc.childNodes), 0)\n\n    def test_end_root_context(self):\n        root = self.builder.current_context()\n\n        self.assertTrue(self.builder.end_context())\n        self.assertIsNone(self.builder.current_context())\n\n        # No contexts left\n        self.assertFalse(self.builder.end_context())\n\n        doc_children = self.doc.childNodes\n\n        self.assertEqual(len(doc_children), 1)\n        self.assertEqual(len(doc_children[0].childNodes), 0)\n        self.assertEqual(doc_children[0].tagName, root.element_tag())\n\n    def test_end_nested_context(self):\n        self.builder.begin_context('testsuite', 'name')\n        self.builder.current_context()\n\n        self.assertTrue(self.builder.end_context())\n\n        # Only updates the document when all contexts end\n        self.assertEqual(len(self.doc.childNodes), 0)\n\n    def test_end_all_context_stack(self):\n        root = self.builder.current_context()\n\n        self.builder.begin_context('testsuite', 'name')\n        nested = self.builder.current_context()\n\n        self.assertTrue(self.builder.end_context())\n        self.assertTrue(self.builder.end_context())\n\n        # No contexts left\n        self.assertFalse(self.builder.end_context())\n\n        root_child = self.doc.childNodes\n\n        self.assertEqual(len(root_child), 1)\n        self.assertEqual(root_child[0].tagName, root.element_tag())\n\n        nested_child = root_child[0].childNodes\n\n        self.assertEqual(len(nested_child), 1)\n        self.assertEqual(nested_child[0].tagName, nested.element_tag())\n\n    def test_append_valid_unicode_cdata_section(self):\n        self.builder.append_cdata_section('tag', self.valid_chars)\n        self.builder.end_context()\n\n        root_child = self.doc.childNodes[0]\n\n        cdata_container = root_child.childNodes[0]\n        self.assertEqual(cdata_container.tagName, 'tag')\n\n        cdata = cdata_container.childNodes[0]\n        self.assertEqual(cdata.data, self.valid_chars)\n\n    def test_append_invalid_unicode_cdata_section(self):\n        self.builder.append_cdata_section('tag', self.invalid_chars)\n        self.builder.end_context()\n\n        root_child = self.doc.childNodes[0]\n        cdata_container = root_child.childNodes[0]\n\n        cdata = cdata_container.childNodes[0]\n        self.assertEqual(cdata.data, self.invalid_chars_replace)\n\n    def test_append_cdata_closing_tags_into_cdata_section(self):\n        self.builder.append_cdata_section('tag', ']]>')\n        self.builder.end_context()\n        root_child = self.doc.childNodes[0]\n        cdata_container = root_child.childNodes[0]\n        self.assertEqual(len(cdata_container.childNodes), 2)\n        self.assertEqual(cdata_container.childNodes[0].data, ']]')\n        self.assertEqual(cdata_container.childNodes[1].data, '>')\n\n    def test_append_tag_with_valid_unicode_values(self):\n        self.builder.append('tag', self.valid_chars, attr=self.valid_chars)\n        self.builder.end_context()\n\n        root_child = self.doc.childNodes[0]\n        tag = root_child.childNodes[0]\n\n        self.assertEqual(tag.tagName, 'tag')\n        self.assertEqual(tag.getAttribute('attr'), self.valid_chars)\n        self.assertEqual(tag.childNodes[0].data, self.valid_chars)\n\n    def test_append_tag_with_invalid_unicode_values(self):\n        self.builder.append('tag', self.invalid_chars, attr=self.invalid_chars)\n        self.builder.end_context()\n\n        root_child = self.doc.childNodes[0]\n        tag = root_child.childNodes[0]\n\n        self.assertEqual(tag.tagName, 'tag')\n        self.assertEqual(tag.getAttribute('attr'), self.invalid_chars_replace)\n        self.assertEqual(tag.childNodes[0].data, self.invalid_chars_replace)\n\n    def test_increment_root_context_counter(self):\n        self.builder.increment_counter('tests')\n        self.builder.end_context()\n\n        root_child = self.doc.childNodes[0]\n\n        self.assertEqual(root_child.tagName, 'testsuites')\n        self.assertEqual(root_child.getAttribute('tests'), '1')\n\n    def test_increment_nested_context_counter(self):\n        self.builder.increment_counter('tests')\n\n        self.builder.begin_context('testsuite', 'name')\n        self.builder.increment_counter('tests')\n\n        self.builder.end_context()\n        self.builder.end_context()\n\n        root_child = self.doc.childNodes[0]\n        nested_child = root_child.childNodes[0]\n\n        self.assertEqual(root_child.tagName, 'testsuites')\n        self.assertEqual(nested_child.getAttribute('tests'), '1')\n        self.assertEqual(root_child.getAttribute('tests'), '2')\n\n    def test_finish_nested_context(self):\n        self.builder.begin_context('testsuite', 'name')\n\n        tree = ET.fromstring(self.builder.finish())\n\n        self.assertEqual(tree.tag, 'testsuites')\n        self.assertEqual(len(tree.findall(\"./testsuite\")), 1)\n"
  },
  {
    "path": "tests/discovery_test.py",
    "content": "import unittest\n\n\nclass DiscoveryTest(unittest.TestCase):\n    def test_discovery_pass(self):\n        pass\n"
  },
  {
    "path": "tests/django_example/app/__init__.py",
    "content": ""
  },
  {
    "path": "tests/django_example/app/admin.py",
    "content": "from django.contrib import admin  # NOQA\n\n# Register your models here.\n"
  },
  {
    "path": "tests/django_example/app/migrations/__init__.py",
    "content": ""
  },
  {
    "path": "tests/django_example/app/models.py",
    "content": "from django.db import models  # NOQA\n\n# Create your models here.\n"
  },
  {
    "path": "tests/django_example/app/tests.py",
    "content": "from django.test import TestCase\n\n\n# Create your tests here.\nclass DummyTestCase(TestCase):\n    def test_pass(self):\n        \"\"\"Test Pass\"\"\"\n        pass\n\n    def test_negative_comment1(self):\n        \"\"\"Use a close comment XML tag -->\"\"\"\n        pass\n\n    def test_negative_comment2(self):\n        \"\"\"Check XML tag </testsuites>\"\"\"\n        pass\n"
  },
  {
    "path": "tests/django_example/app/views.py",
    "content": "from django.shortcuts import render  # NOQA\n\n# Create your views here.\n"
  },
  {
    "path": "tests/django_example/app2/__init__.py",
    "content": ""
  },
  {
    "path": "tests/django_example/app2/admin.py",
    "content": "from django.contrib import admin  # NOQA\n\n# Register your models here.\n"
  },
  {
    "path": "tests/django_example/app2/migrations/__init__.py",
    "content": ""
  },
  {
    "path": "tests/django_example/app2/models.py",
    "content": "from django.db import models  # NOQA\n\n# Create your models here.\n"
  },
  {
    "path": "tests/django_example/app2/tests.py",
    "content": "from django.test import TestCase\n\n\n# Create your tests here.\nclass DummyTestCase(TestCase):\n    def test_pass(self):\n        pass\n"
  },
  {
    "path": "tests/django_example/app2/views.py",
    "content": "from django.shortcuts import render  # NOQA\n\n# Create your views here.\n"
  },
  {
    "path": "tests/django_example/example/__init__.py",
    "content": ""
  },
  {
    "path": "tests/django_example/example/settings.py",
    "content": "\nimport os\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = 'not-a-secret'\nDEBUG = True\nALLOWED_HOSTS = []\nINSTALLED_APPS = ['app', 'app2']\nMIDDLEWARE_CLASSES = []\nROOT_URLCONF = 'example.urls'\nTEMPLATES = []\nDATABASES = {\n    'default': {\n        'ENGINE': 'django.db.backends.sqlite3',\n        'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n    }\n}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/static/'\n\n# The settings we care about for xmlrunner.\n# They are commented out because we will use settings.configure() in tests.\n\n# TEST_RUNNER = 'xmlrunner.extra.djangotestrunner.XMLTestRunner'\n# TEST_OUTPUT_FILE_NAME = 'results.xml'\n# TEST_OUTPUT_VERBOSE = 2\n"
  },
  {
    "path": "tests/django_example/example/urls.py",
    "content": "\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\n\nurlpatterns = [\n    url(r'^admin/', admin.site.urls),\n]\n"
  },
  {
    "path": "tests/django_example/example/wsgi.py",
    "content": "\nimport os\nfrom django.core.wsgi import get_wsgi_application\n\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"example.settings\")\napplication = get_wsgi_application()\n"
  },
  {
    "path": "tests/django_example/manage.py",
    "content": "#!/usr/bin/env python\nimport os\nimport sys\n\n\nif __name__ == \"__main__\":\n    os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"example.settings\")\n    from django.core.management import execute_from_command_line\n    execute_from_command_line(sys.argv)\n"
  },
  {
    "path": "tests/django_test.py",
    "content": "from xmlrunner.unittest import unittest\n\nimport sys\nimport os\nfrom os import path\nimport glob\nfrom unittest import mock\nimport tempfile\nimport shutil\n\ntry:\n    import django\nexcept ImportError:\n    django = None\nelse:\n    from django.test.utils import get_runner\n    from django.conf import settings, UserSettingsHolder\n    from django.apps import apps\n    settings.configure(DEBUG=True)\n\n\nTESTS_DIR = path.dirname(__file__)\n\n\n@unittest.skipIf(django is None, 'django not found')\nclass DjangoTest(unittest.TestCase):\n\n    def setUp(self):\n        self._old_cwd = os.getcwd()\n        self.project_dir = path.abspath(path.join(TESTS_DIR, 'django_example'))\n        self.tmpdir = tempfile.mkdtemp()\n        os.chdir(self.project_dir)\n        sys.path.append(self.project_dir)\n        # allow changing settings\n        self.old_settings = settings._wrapped\n        os.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'\n        settings.INSTALLED_APPS  # load settings on first access\n        settings.DATABASES['default'] = {}\n        settings.DATABASES['default']['NAME'] = path.join(\n            self.tmpdir, 'db.sqlite3')\n        # this goes around the \"settings already loaded\" issue.\n        self.override = UserSettingsHolder(settings._wrapped)\n        settings._wrapped = self.override\n\n    def tearDown(self):\n        os.chdir(self._old_cwd)\n        shutil.rmtree(self.tmpdir)\n        settings._wrapped = self.old_settings\n\n    def _override_settings(self, **kwargs):\n        # see django.test.utils.override_settings\n        for key, new_value in kwargs.items():\n            setattr(self.override, key, new_value)\n\n    def _check_runner(self, runner):\n        suite = runner.build_suite(test_labels=['app2', 'app'])\n        test_ids = [test.id() for test in suite]\n        self.assertEqual(test_ids, [\n            'app2.tests.DummyTestCase.test_pass',\n            'app.tests.DummyTestCase.test_negative_comment1',\n            'app.tests.DummyTestCase.test_negative_comment2',\n            'app.tests.DummyTestCase.test_pass',\n        ])\n        suite = runner.build_suite(test_labels=[])\n        test_ids = [test.id() for test in suite]\n        self.assertEqual(set(test_ids), set([\n            'app.tests.DummyTestCase.test_pass',\n            'app.tests.DummyTestCase.test_negative_comment1',\n            'app.tests.DummyTestCase.test_negative_comment2',\n            'app2.tests.DummyTestCase.test_pass',\n        ]))\n\n    def test_django_runner(self):\n        runner_class = get_runner(settings)\n        runner = runner_class()\n        self._check_runner(runner)\n\n    def test_django_xmlrunner(self):\n        self._override_settings(\n            TEST_RUNNER='xmlrunner.extra.djangotestrunner.XMLTestRunner')\n        runner_class = get_runner(settings)\n        runner = runner_class()\n        self._check_runner(runner)\n\n    def test_django_verbose(self):\n        self._override_settings(\n            TEST_OUTPUT_VERBOSE=True,\n            TEST_RUNNER='xmlrunner.extra.djangotestrunner.XMLTestRunner')\n        runner_class = get_runner(settings)\n        runner = runner_class()\n        self._check_runner(runner)\n\n    def test_django_single_report(self):\n        self._override_settings(\n            TEST_OUTPUT_DIR=self.tmpdir,\n            TEST_OUTPUT_FILE_NAME='results.xml',\n            TEST_OUTPUT_VERBOSE=0,\n            TEST_RUNNER='xmlrunner.extra.djangotestrunner.XMLTestRunner')\n        apps.populate(settings.INSTALLED_APPS)\n        runner_class = get_runner(settings)\n        runner = runner_class()\n        suite = runner.build_suite()\n        runner.run_suite(suite)\n        expected_file = path.join(self.tmpdir, 'results.xml')\n        self.assertTrue(path.exists(expected_file),\n                        'did not generate xml report where expected.')\n\n    def test_django_single_report_create_folder(self):\n        intermediate_directory = 'report'\n        directory = path.join(self.tmpdir, intermediate_directory)\n        self._override_settings(\n            TEST_OUTPUT_DIR=directory,\n            TEST_OUTPUT_FILE_NAME='results.xml',\n            TEST_OUTPUT_VERBOSE=0,\n            TEST_RUNNER='xmlrunner.extra.djangotestrunner.XMLTestRunner')\n        apps.populate(settings.INSTALLED_APPS)\n        runner_class = get_runner(settings)\n        runner = runner_class()\n        suite = runner.build_suite()\n        runner.run_suite(suite)\n        expected_file = path.join(directory, 'results.xml')\n        self.assertTrue(path.exists(expected_file),\n                        'did not generate xml report where expected.')\n\n    def test_django_multiple_reports(self):\n        self._override_settings(\n            TEST_OUTPUT_DIR=self.tmpdir,\n            TEST_OUTPUT_VERBOSE=0,\n            TEST_RUNNER='xmlrunner.extra.djangotestrunner.XMLTestRunner')\n        apps.populate(settings.INSTALLED_APPS)\n        runner_class = get_runner(settings)\n        runner = runner_class()\n        suite = runner.build_suite(test_labels=None)\n        runner.run_suite(suite)\n        test_files = glob.glob(path.join(self.tmpdir, 'TEST*.xml'))\n        self.assertTrue(test_files,\n                        'did not generate xml reports where expected.')\n        self.assertEqual(2, len(test_files))\n\n    def test_django_runner_extension(self):\n        from xmlrunner.extra.djangotestrunner import XMLTestRunner\n\n        class MyDjangoRunner(XMLTestRunner):\n            test_runner = mock.Mock()\n        \n        self._override_settings(\n            TEST_OUTPUT_DIR=self.tmpdir,\n            TEST_OUTPUT_VERBOSE=0)\n        apps.populate(settings.INSTALLED_APPS)\n\n        runner = MyDjangoRunner()\n        suite = runner.build_suite(test_labels=None)\n        runner.run_suite(suite)\n        \n        self.assertTrue(MyDjangoRunner.test_runner.called)\n"
  },
  {
    "path": "tests/doctest_example.py",
    "content": "\ndef twice(n):\n    \"\"\"\n    >>> twice(5)\n    10\n    \"\"\"\n    return 2 * n\n\n\nclass Multiplicator(object):\n    def threetimes(self, n):\n        \"\"\"\n        >>> Multiplicator().threetimes(5)\n        15\n        \"\"\"\n        return 3 * n\n"
  },
  {
    "path": "tests/testsuite.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Executable module to test unittest-xml-reporting.\n\"\"\"\nfrom __future__ import print_function\n\nimport contextlib\nimport io\nimport sys\n\nfrom xmlrunner.unittest import unittest\nimport xmlrunner\nfrom xmlrunner.result import _DuplicateWriter\nfrom xmlrunner.result import _XMLTestResult\nfrom xmlrunner.result import resolve_filename\nimport doctest\nimport tests.doctest_example\nfrom io import StringIO, BytesIO\nfrom tempfile import mkdtemp\nfrom tempfile import mkstemp\nfrom shutil import rmtree\nfrom glob import glob\nfrom xml.dom import minidom\nfrom lxml import etree\nimport os\nimport os.path\nfrom unittest import mock\n\n\ndef _load_schema(version):\n    path = os.path.join(\n        os.path.dirname(__file__),\n        'vendor/jenkins/xunit-plugin', version, 'junit-10.xsd')\n    with open(path, 'r') as schema_file:\n        schema_doc = etree.parse(schema_file)\n        schema = etree.XMLSchema(schema_doc)\n        return schema\n    raise RuntimeError('Could not load JUnit schema')  # pragma: no cover\n\n\ndef validate_junit_report(version, text):\n    document = etree.parse(BytesIO(text))\n    schema = _load_schema(version)\n    schema.assertValid(document)\n\n\nclass DoctestTest(unittest.TestCase):\n\n    def test_doctest_example(self):\n        suite = doctest.DocTestSuite(tests.doctest_example)\n        outdir = BytesIO()\n        stream = StringIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=stream, output=outdir, verbosity=0)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        self.assertIn('classname=\"tests.doctest_example.Multiplicator\"'.encode('utf8'), output)\n        self.assertIn('name=\"threetimes\"'.encode('utf8'), output)\n        self.assertIn('classname=\"tests.doctest_example\"'.encode('utf8'), output)\n        self.assertIn('name=\"twice\"'.encode('utf8'), output)\n\n\n@contextlib.contextmanager\ndef capture_stdout_stderr():\n    \"\"\"\n    context manager to capture stdout and stderr\n    \"\"\"\n    orig_stdout = sys.stdout\n    orig_stderr = sys.stderr\n    sys.stdout = StringIO()\n    sys.stderr = StringIO()\n    try:\n        yield (sys.stdout, sys.stderr)\n    finally:\n        sys.stdout = orig_stdout\n        sys.stderr = orig_stderr\n\n\ndef _strip_xml(xml, changes):\n    doc = etree.fromstring(xml)\n    for xpath, attributes in changes.items():\n        for node in doc.xpath(xpath):\n            for attrib in node.attrib.keys():\n                if attrib not in attributes:\n                    del node.attrib[attrib]\n    return etree.tostring(doc)\n\n\ndef some_decorator(f):\n    # for issue #195\n    code = \"\"\"\\\ndef wrapper(*args, **kwargs):\n    return func(*args, **kwargs)\n\"\"\"\n    evaldict = dict(func=f)\n    exec(code, evaldict)\n    return evaldict['wrapper']\n\n\nclass XMLTestRunnerTestCase(unittest.TestCase):\n    \"\"\"\n    XMLTestRunner test case.\n    \"\"\"\n    class DummyTest(unittest.TestCase):\n\n        @unittest.skip(\"demonstrating skipping\")\n        def test_skip(self):\n            pass   # pragma: no cover\n\n        @unittest.skip(u\"demonstrating non-ascii skipping: éçà\")\n        def test_non_ascii_skip(self):\n            pass   # pragma: no cover\n\n        def test_pass(self):\n            pass\n\n        def test_fail(self):\n            self.assertTrue(False)\n\n        @unittest.expectedFailure\n        def test_expected_failure(self):\n            self.assertTrue(False)\n\n        @unittest.expectedFailure\n        def test_unexpected_success(self):\n            pass\n\n        def test_error(self):\n            1 / 0\n\n        def test_cdata_section(self):\n            print('<![CDATA[content]]>')\n\n        def test_invalid_xml_chars_in_doc(self):\n            \"\"\"\n            Testing comments, -- is not allowed, or invalid xml 1.0 chars such as \\x0c\n            \"\"\"\n            pass\n\n        def test_non_ascii_error(self):\n            self.assertEqual(u\"éçà\", 42)\n\n        def test_unsafe_unicode(self):\n            print(u\"A\\x00B\\x08C\\x0BD\\x0C\")\n\n        def test_output_stdout_and_stderr(self):\n            print('test on stdout')\n            print('test on stderr', file=sys.stderr)\n\n        def test_runner_buffer_output_pass(self):\n            print('should not be printed')\n\n        def test_runner_buffer_output_fail(self):\n            print('should be printed')\n            self.fail('expected to fail')\n\n        def test_output(self):\n            print('test message')\n\n        def test_non_ascii_runner_buffer_output_fail(self):\n            print(u'Where is the café ?')\n            self.fail(u'The café could not be found')\n\n    class DummySubTest(unittest.TestCase):\n\n        def test_subTest_pass(self):\n            for i in range(2):\n                with self.subTest(i=i):\n                    pass\n\n        def test_subTest_fail(self):\n            for i in range(2):\n                with self.subTest(i=i):\n                    self.fail('this is a subtest.')\n\n        def test_subTest_error(self):\n            for i in range(2):\n                with self.subTest(i=i):\n                    raise Exception('this is a subtest')\n\n        def test_subTest_mixed(self):\n            for i in range(2):\n                with self.subTest(i=i):\n                    self.assertLess(i, 1, msg='this is a subtest.')\n\n        def test_subTest_with_dots(self):\n            for i in range(2):\n                with self.subTest(module='hello.world.subTest{}'.format(i)):\n                    self.fail('this is a subtest.')\n\n    class DecoratedUnitTest(unittest.TestCase):\n\n        @some_decorator\n        def test_pass(self):\n            pass\n\n    class DummyErrorInCallTest(unittest.TestCase):\n\n        def __call__(self, result):\n            try:\n                raise Exception('Massive fail')\n            except Exception:\n                result.addError(self, sys.exc_info())\n                return\n\n        def test_pass(self):\n            # it is expected not to be called.\n            pass  # pragma: no cover\n\n    class DummyRefCountTest(unittest.TestCase):\n        class dummy(object):\n            pass\n        def test_fail(self):\n            inst = self.dummy()\n            self.assertTrue(False)\n\n    def setUp(self):\n        self.stream = StringIO()\n        self.outdir = mkdtemp()\n        self.verbosity = 0\n        self.runner_kwargs = {}\n        self.addCleanup(rmtree, self.outdir)\n\n    def _test_xmlrunner(self, suite, runner=None, outdir=None):\n        if outdir is None:\n            outdir = self.outdir\n        stream = self.stream\n        verbosity = self.verbosity\n        runner_kwargs = self.runner_kwargs\n        if runner is None:\n            runner = xmlrunner.XMLTestRunner(\n                stream=stream, output=outdir, verbosity=verbosity,\n                **runner_kwargs)\n        if isinstance(outdir, BytesIO):\n            self.assertFalse(outdir.getvalue())\n        else:\n            self.assertEqual(0, len(glob(os.path.join(outdir, '*xml'))))\n        runner.run(suite)\n        if isinstance(outdir, BytesIO):\n            self.assertTrue(outdir.getvalue())\n        else:\n            self.assertEqual(1, len(glob(os.path.join(outdir, '*xml'))))\n        return runner\n\n    def test_basic_unittest_constructs(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        suite.addTest(self.DummyTest('test_skip'))\n        suite.addTest(self.DummyTest('test_fail'))\n        suite.addTest(self.DummyTest('test_expected_failure'))\n        suite.addTest(self.DummyTest('test_unexpected_success'))\n        suite.addTest(self.DummyTest('test_error'))\n        self._test_xmlrunner(suite)\n\n    def test_classnames(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        suite.addTest(self.DummySubTest('test_subTest_pass'))\n        outdir = BytesIO()\n        stream = StringIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=stream, output=outdir, verbosity=0)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        output = _strip_xml(output, {\n            '//testsuite': (),\n            '//testcase': ('classname', 'name'),\n            '//failure': ('message',),\n        })\n        self.assertRegex(\n            output,\n            r'classname=\"tests\\.testsuite\\.(XMLTestRunnerTestCase\\.)?'\n            r'DummyTest\" name=\"test_pass\"'.encode('utf8'),\n        )\n        self.assertRegex(\n            output,\n            r'classname=\"tests\\.testsuite\\.(XMLTestRunnerTestCase\\.)?'\n            r'DummySubTest\" name=\"test_subTest_pass\"'.encode('utf8'),\n        )\n\n    def test_expected_failure(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_expected_failure'))\n        outdir = BytesIO()\n\n        self._test_xmlrunner(suite, outdir=outdir)\n\n        self.assertNotIn(b'<failure', outdir.getvalue())\n        self.assertNotIn(b'<error', outdir.getvalue())\n        self.assertIn(b'<skip', outdir.getvalue())\n\n    def test_unexpected_success(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_unexpected_success'))\n        outdir = BytesIO()\n\n        self._test_xmlrunner(suite, outdir=outdir)\n\n        self.assertNotIn(b'<failure', outdir.getvalue())\n        self.assertIn(b'<error', outdir.getvalue())\n        self.assertNotIn(b'<skip', outdir.getvalue())\n\n    def test_xmlrunner_non_ascii(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_non_ascii_skip'))\n        suite.addTest(self.DummyTest('test_non_ascii_error'))\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        self.assertIn(\n            u'message=\"demonstrating non-ascii skipping: éçà\"'.encode('utf8'),\n            output)\n\n    def test_xmlrunner_safe_xml_encoding_name(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        firstline = output.splitlines()[0]\n        # test for issue #74\n        self.assertIn('encoding=\"UTF-8\"'.encode('utf8'), firstline)\n\n    def test_xmlrunner_check_for_valid_xml_streamout(self):\n        \"\"\"\n        This test checks if the xml document is valid if there are more than\n        one testsuite and the output of the report is a single stream.\n        \"\"\"\n        class DummyTestA(unittest.TestCase):\n\n            def test_pass(self):\n                pass\n\n        class DummyTestB(unittest.TestCase):\n\n            def test_pass(self):\n                pass\n\n        suite = unittest.TestSuite()\n        suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DummyTestA))\n        suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DummyTestB))\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        # Finally check if we have a valid XML document or not.\n        try:\n            minidom.parseString(output)\n        except Exception as e:  # pragma: no cover\n            # note: we could remove the try/except, but it's more crude.\n            self.fail(e)\n\n    def test_xmlrunner_unsafe_unicode(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_unsafe_unicode'))\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        self.assertIn(u\"<![CDATA[ABCD\\n]]>\".encode('utf8'),\n                      output)\n\n    def test_xmlrunner_non_ascii_failures(self):\n        self._xmlrunner_non_ascii_failures()\n\n    def test_xmlrunner_non_ascii_failures_buffered_output(self):\n        self._xmlrunner_non_ascii_failures(buffer=True)\n\n    def _xmlrunner_non_ascii_failures(self, buffer=False):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest(\n            'test_non_ascii_runner_buffer_output_fail'))\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            buffer=buffer, **self.runner_kwargs)\n\n        # allow output non-ascii letters to stdout\n        orig_stdout = sys.stdout\n        sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n\n        try:\n            runner.run(suite)\n        finally:\n            # Not to be closed when TextIOWrapper is disposed.\n            sys.stdout.detach()\n            sys.stdout = orig_stdout\n        outdir.seek(0)\n        output = outdir.read()\n        self.assertIn(\n            u'Where is the café ?'.encode('utf8'),\n            output)\n        self.assertIn(\n            u'The café could not be found'.encode('utf8'),\n            output)\n\n    @unittest.expectedFailure\n    def test_xmlrunner_buffer_output_pass(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_runner_buffer_output_pass'))\n        self._test_xmlrunner(suite)\n        testsuite_output = self.stream.getvalue()\n        # Since we are always buffering stdout/stderr\n        # it is currently troublesome to print anything at all\n        # and be consistent with --buffer option (issue #59)\n        self.assertIn('should not be printed', testsuite_output)\n        # this will be fixed when using the composite approach\n        # that was under development in the rewrite branch.\n\n    def test_xmlrunner_buffer_output_fail(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_runner_buffer_output_fail'))\n        # --buffer option\n        self.runner_kwargs['buffer'] = True\n        self._test_xmlrunner(suite)\n        testsuite_output = self.stream.getvalue()\n        self.assertIn('should be printed', testsuite_output)\n\n    def test_xmlrunner_output_without_buffer(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_output'))\n        with capture_stdout_stderr() as r:\n            self._test_xmlrunner(suite)\n        output_from_test = r[0].getvalue()\n        self.assertIn('test message', output_from_test)\n\n    def test_xmlrunner_output_with_buffer(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_output'))\n        # --buffer option\n        self.runner_kwargs['buffer'] = True\n        with capture_stdout_stderr() as r:\n            self._test_xmlrunner(suite)\n        output_from_test = r[0].getvalue()\n        self.assertNotIn('test message', output_from_test)\n\n    def test_xmlrunner_stdout_stderr_recovered_without_buffer(self):\n        orig_stdout = sys.stdout\n        orig_stderr = sys.stderr\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        self._test_xmlrunner(suite)\n        self.assertIs(orig_stdout, sys.stdout)\n        self.assertIs(orig_stderr, sys.stderr)\n\n    def test_xmlrunner_stdout_stderr_recovered_with_buffer(self):\n        orig_stdout = sys.stdout\n        orig_stderr = sys.stderr\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        # --buffer option\n        self.runner_kwargs['buffer'] = True\n        self._test_xmlrunner(suite)\n        self.assertIs(orig_stdout, sys.stdout)\n        self.assertIs(orig_stderr, sys.stderr)\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n\n    @unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),\n                     'unittest.TestCase.subTest not present.')\n    def test_unittest_subTest_fail(self):\n        # test for issue #77\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummySubTest('test_subTest_fail'))\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        output = _strip_xml(output, {\n            '//testsuite': (),\n            '//testcase': ('classname', 'name'),\n            '//failure': ('message',),\n        })\n        self.assertRegex(\n            output,\n            br'<testcase classname=\"tests\\.testsuite\\.'\n            br'(XMLTestRunnerTestCase\\.)?DummySubTest\" '\n            br'name=\"test_subTest_fail \\(i=0\\)\"')\n        self.assertRegex(\n            output,\n            br'<testcase classname=\"tests\\.testsuite\\.'\n            br'(XMLTestRunnerTestCase\\.)?DummySubTest\" '\n            br'name=\"test_subTest_fail \\(i=1\\)\"')\n\n    @unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),\n                     'unittest.TestCase.subTest not present.')\n    def test_unittest_subTest_error(self):\n        # test for issue #155\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummySubTest('test_subTest_error'))\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        output = _strip_xml(output, {\n            '//testsuite': (),\n            '//testcase': ('classname', 'name'),\n            '//failure': ('message',),\n        })\n        self.assertRegex(\n            output,\n            br'<testcase classname=\"tests\\.testsuite\\.'\n            br'(XMLTestRunnerTestCase\\.)?DummySubTest\" '\n            br'name=\"test_subTest_error \\(i=0\\)\"')\n        self.assertRegex(\n            output,\n            br'<testcase classname=\"tests\\.testsuite\\.'\n            br'(XMLTestRunnerTestCase\\.)?DummySubTest\" '\n            br'name=\"test_subTest_error \\(i=1\\)\"')\n\n\n    @unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),\n                     'unittest.TestCase.subTest not present.')\n    def test_unittest_subTest_mixed(self):\n        # test for issue #155\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummySubTest('test_subTest_mixed'))\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        output = _strip_xml(output, {\n            '//testsuite': (),\n            '//testcase': ('classname', 'name'),\n            '//failure': ('message',),\n        })\n        self.assertNotIn(\n            'name=\"test_subTest_mixed (i=0)\"'.encode('utf8'),\n            output)\n        self.assertIn(\n            'name=\"test_subTest_mixed (i=1)\"'.encode('utf8'),\n            output)\n\n    @unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),\n                     'unittest.TestCase.subTest not present.')\n    def test_unittest_subTest_pass(self):\n        # Test for issue #85\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummySubTest('test_subTest_pass'))\n        self._test_xmlrunner(suite)\n\n    @unittest.skipIf(not hasattr(unittest.TestCase, 'subTest'),\n                     'unittest.TestCase.subTest not present.')\n    def test_unittest_subTest_with_dots(self):\n        # Test for issue #85\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummySubTest('test_subTest_with_dots'))\n        outdir = BytesIO()\n\n        self._test_xmlrunner(suite, outdir=outdir)\n\n        xmlcontent = outdir.getvalue().decode()\n\n        # Method name\n        self.assertNotIn('name=\"subTest', xmlcontent, 'parsing of test method name is not done correctly')\n        self.assertIn('name=\"test_subTest_with_dots (module=\\'hello.world.subTest', xmlcontent)\n\n        # Class name\n        matchString = 'classname=\"tests.testsuite.XMLTestRunnerTestCase.DummySubTest.test_subTest_with_dots (module=\\'hello.world\"'\n        self.assertNotIn(matchString, xmlcontent, 'parsing of class name is not done correctly')\n        self.assertIn('classname=\"tests.testsuite.XMLTestRunnerTestCase.DummySubTest\"', xmlcontent)\n\n    def test_xmlrunner_pass(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        self._test_xmlrunner(suite)\n\n    def test_xmlrunner_failfast(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_fail'))\n        suite.addTest(self.DummyTest('test_pass'))\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir,\n            verbosity=self.verbosity, failfast=True,\n            **self.runner_kwargs)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        self.assertIn('test_fail'.encode('utf8'), output)\n        self.assertNotIn('test_pass'.encode('utf8'), output)\n\n    def test_xmlrunner_verbose(self):\n        self.verbosity = 1\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        self._test_xmlrunner(suite)\n\n    def test_xmlrunner_showall(self):\n        self.verbosity = 2\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        self._test_xmlrunner(suite)\n\n    def test_xmlrunner_cdata_section(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_cdata_section'))\n        self._test_xmlrunner(suite)\n\n    def test_xmlrunner_invalid_xml_chars_in_doc(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_invalid_xml_chars_in_doc'))\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        # Finally check if we have a valid XML document or not.\n        try:\n            minidom.parseString(output)\n        except Exception as e:  # pragma: no cover\n            # note: we could remove the try/except, but it's more crude.\n            self.fail(e)\n\n    def test_xmlrunner_outsuffix(self):\n        self.runner_kwargs['outsuffix'] = '.somesuffix'\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        self._test_xmlrunner(suite)\n        xmlfile = glob(os.path.join(self.outdir, '*xml'))[0]\n        assert xmlfile.endswith('.somesuffix.xml')\n\n    def test_xmlrunner_nosuffix(self):\n        self.runner_kwargs['outsuffix'] = ''\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        self._test_xmlrunner(suite)\n        xmlfile = glob(os.path.join(self.outdir, '*xml'))[0]\n        xmlfile = os.path.basename(xmlfile)\n        assert xmlfile.endswith('DummyTest.xml')\n\n    def test_junitxml_properties(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        suite.properties = dict(key='value')\n        self._test_xmlrunner(suite)\n\n    def test_junitxml_xsd_validation_order(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_fail'))\n        suite.addTest(self.DummyTest('test_pass'))\n        suite.addTest(self.DummyTest('test_output_stdout_and_stderr'))\n        suite.properties = dict(key='value')\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        # poor man's schema validation; see issue #90\n        i_properties = output.index('<properties>'.encode('utf8'))\n        i_system_out = output.index('<system-out>'.encode('utf8'))\n        i_system_err = output.index('<system-err>'.encode('utf8'))\n        i_testcase = output.index('<testcase'.encode('utf8'))\n        self.assertTrue(i_properties < i_testcase <\n                        i_system_out < i_system_err)\n        # XSD validation - for good measure.\n        validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', output)\n\n    def test_junitxml_xsd_validation_empty_properties(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_fail'))\n        suite.addTest(self.DummyTest('test_pass'))\n        suite.properties = None\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n        self.assertNotIn('<properties>'.encode('utf8'), output)\n        validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', output)\n\n    @unittest.skipIf(hasattr(sys, 'pypy_version_info'),\n                     'skip - PyPy + lxml seems to be hanging')\n    def test_xunit_plugin_transform(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_fail'))\n        suite.addTest(self.DummyTest('test_pass'))\n        suite.properties = None\n        outdir = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=self.stream, output=outdir, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        runner.run(suite)\n        outdir.seek(0)\n        output = outdir.read()\n\n        validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', output)\n        with self.assertRaises(etree.DocumentInvalid):\n            validate_junit_report('ae25da5089d4f94ac6c4669bf736e4d416cc4665', output)\n\n        from xmlrunner.extra.xunit_plugin import transform\n        transformed = transform(output)\n        validate_junit_report('14c6e39c38408b9ed6280361484a13c6f5becca7', transformed)\n        validate_junit_report('ae25da5089d4f94ac6c4669bf736e4d416cc4665', transformed)\n        self.assertIn('test_pass'.encode('utf8'), transformed)\n        self.assertIn('test_fail'.encode('utf8'), transformed)\n\n    def test_xmlrunner_elapsed_times(self):\n        self.runner_kwargs['elapsed_times'] = False\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        self._test_xmlrunner(suite)\n\n    def test_xmlrunner_resultclass(self):\n        class Result(_XMLTestResult):\n            pass\n\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        self.runner_kwargs['resultclass'] = Result\n        self._test_xmlrunner(suite)\n\n    def test_xmlrunner_stream(self):\n        stream = self.stream\n        output = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=stream, output=output, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        runner.run(suite)\n\n    def test_xmlrunner_stream_empty_testsuite(self):\n        stream = self.stream\n        output = BytesIO()\n        runner = xmlrunner.XMLTestRunner(\n            stream=stream, output=output, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        suite = unittest.TestSuite()\n        runner.run(suite)\n\n    def test_xmlrunner_output_subdir(self):\n        stream = self.stream\n        output = os.path.join(self.outdir, 'subdir')\n        runner = xmlrunner.XMLTestRunner(\n            stream=stream, output=output, verbosity=self.verbosity,\n            **self.runner_kwargs)\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyTest('test_pass'))\n        runner.run(suite)\n\n    def test_xmlrunner_patched_stdout(self):\n        old_stdout, old_stderr = sys.stdout, sys.stderr\n        try:\n            sys.stdout, sys.stderr = StringIO(), StringIO()\n            suite = unittest.TestSuite()\n            suite.addTest(self.DummyTest('test_pass'))\n            suite.properties = dict(key='value')\n            self._test_xmlrunner(suite)\n        finally:\n            sys.stdout, sys.stderr = old_stdout, old_stderr\n\n    def test_opaque_decorator(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DecoratedUnitTest('test_pass'))\n        self._test_xmlrunner(suite)\n        testsuite_output = self.stream.getvalue()\n        self.assertNotIn('IOError:', testsuite_output)\n\n    def test_xmlrunner_error_in_call(self):\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyErrorInCallTest('test_pass'))\n        self._test_xmlrunner(suite)\n        testsuite_output = self.stream.getvalue()\n        self.assertIn('Exception: Massive fail', testsuite_output)\n\n    @unittest.skipIf(not unittest.BaseTestSuite._cleanup,\n                     'skip - do not cleanup')\n    @unittest.skipIf(not hasattr(sys, 'getrefcount'),\n                     'skip - PyPy does not have sys.getrefcount.')\n    @unittest.skipIf((3, 0) <= sys.version_info < (3, 4),\n                     'skip - test not garbage collected. '\n                     'https://bugs.python.org/issue11798.')\n    def test_xmlrunner_hold_traceback(self):\n        import gc\n\n        suite = unittest.TestSuite()\n        suite.addTest(self.DummyRefCountTest('test_fail'))\n        countBeforeTest = sys.getrefcount(self.DummyRefCountTest.dummy)\n        runner = self._test_xmlrunner(suite)\n\n        gc.collect()\n        countAfterTest = sys.getrefcount(self.DummyRefCountTest.dummy)\n        self.assertEqual(countBeforeTest, countAfterTest)\n\n    class StderrXMLTestRunner(xmlrunner.XMLTestRunner):\n        \"\"\"\n        XMLTestRunner that outputs to sys.stderr that might be replaced\n\n        Though XMLTestRunner defaults to use sys.stderr as stream,\n        it cannot be replaced e.g. by replaced by capture_stdout_stderr(),\n        as it's already resolved.\n        This class resolved sys.stderr lazily and outputs to replaced sys.stderr.\n        \"\"\"\n        def __init__(self, **kwargs):\n            super(XMLTestRunnerTestCase.StderrXMLTestRunner, self).__init__(\n                stream=sys.stderr,\n                **kwargs\n            )\n\n    def test_test_program_succeed_with_buffer(self):\n        with capture_stdout_stderr() as r:\n            unittest.TestProgram(\n                module=self.__class__.__module__,\n                testRunner=self.StderrXMLTestRunner,\n                argv=[\n                    sys.argv[0],\n                    '-b',\n                    'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_pass',\n                ],\n                exit=False,\n            )\n        self.assertNotIn('should not be printed', r[0].getvalue())\n        self.assertNotIn('should not be printed', r[1].getvalue())\n\n    def test_test_program_succeed_wo_buffer(self):\n        with capture_stdout_stderr() as r:\n            unittest.TestProgram(\n                module=self.__class__.__module__,\n                testRunner=self.StderrXMLTestRunner,\n                argv=[\n                    sys.argv[0],\n                    'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_pass',\n                ],\n                exit=False,\n            )\n        self.assertIn('should not be printed', r[0].getvalue())\n        self.assertNotIn('should not be printed', r[1].getvalue())\n\n    def test_test_program_fail_with_buffer(self):\n        with capture_stdout_stderr() as r:\n            unittest.TestProgram(\n                module=self.__class__.__module__,\n                testRunner=self.StderrXMLTestRunner,\n                argv=[\n                    sys.argv[0],\n                    '-b',\n                    'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_fail',\n                ],\n                exit=False,\n            )\n        self.assertNotIn('should be printed', r[0].getvalue())\n        self.assertIn('should be printed', r[1].getvalue())\n\n    def test_test_program_fail_wo_buffer(self):\n        with capture_stdout_stderr() as r:\n            unittest.TestProgram(\n                module=self.__class__.__module__,\n                testRunner=self.StderrXMLTestRunner,\n                argv=[\n                    sys.argv[0],\n                    'XMLTestRunnerTestCase.DummyTest.test_runner_buffer_output_fail',\n                ],\n                exit=False,\n            )\n        self.assertIn('should be printed', r[0].getvalue())\n        self.assertNotIn('should be printed', r[1].getvalue())\n\n    def test_partialmethod(self):\n        from functools import partialmethod\n        def test_partialmethod(test):\n            pass\n        class TestWithPartialmethod(unittest.TestCase):\n            pass\n        setattr(\n            TestWithPartialmethod,\n            'test_partialmethod',\n            partialmethod(test_partialmethod),\n        )\n        suite = unittest.TestSuite()\n        suite.addTest(TestWithPartialmethod('test_partialmethod'))\n        self._test_xmlrunner(suite)\n\n\n\nclass DuplicateWriterTestCase(unittest.TestCase):\n    def setUp(self):\n        fd, self.file = mkstemp()\n        self.fh = os.fdopen(fd, 'w')\n        self.buffer = StringIO()\n        self.writer = _DuplicateWriter(self.fh, self.buffer)\n\n    def tearDown(self):\n        self.buffer.close()\n        self.fh.close()\n        os.unlink(self.file)\n\n    def getFirstContent(self):\n        with open(self.file, 'r') as f:\n            return f.read()\n\n    def getSecondContent(self):\n        return self.buffer.getvalue()\n\n    def test_flush(self):\n        self.writer.write('foobarbaz')\n        self.writer.flush()\n        self.assertEqual(self.getFirstContent(), self.getSecondContent())\n\n    def test_writable(self):\n        self.assertTrue(self.writer.writable())\n\n    def test_writelines(self):\n        self.writer.writelines([\n            'foo\\n',\n            'bar\\n',\n            'baz\\n',\n        ])\n        self.writer.flush()\n        self.assertEqual(self.getFirstContent(), self.getSecondContent())\n\n    def test_write(self):\n        # try long buffer (1M)\n        buffer = 'x' * (1024 * 1024)\n        wrote = self.writer.write(buffer)\n        self.writer.flush()\n        self.assertEqual(self.getFirstContent(), self.getSecondContent())\n        self.assertEqual(wrote, len(self.getSecondContent()))\n\n\nclass XMLProgramTestCase(unittest.TestCase):\n    @mock.patch('sys.argv', ['xmlrunner', '-o', 'flaf'])\n    @mock.patch('xmlrunner.runner.XMLTestRunner')\n    @mock.patch('sys.exit')\n    def test_xmlrunner_output(self, exiter, testrunner):\n        xmlrunner.runner.XMLTestProgram()\n\n        kwargs = dict(\n            buffer=mock.ANY,\n            failfast=mock.ANY,\n            verbosity=mock.ANY,\n            warnings=mock.ANY,\n            output='flaf',\n        )\n\n        if sys.version_info[:2] > (3, 4):\n            kwargs.update(tb_locals=mock.ANY)\n\n        testrunner.assert_called_once_with(**kwargs)\n        exiter.assert_called_once_with(False)\n\n    @mock.patch('sys.argv', ['xmlrunner', '--output-file', 'test.xml'])\n    @mock.patch('xmlrunner.runner.open')\n    @mock.patch('xmlrunner.runner.XMLTestRunner')\n    @mock.patch('sys.exit')\n    def test_xmlrunner_output_file(self, exiter, testrunner, opener):\n        xmlrunner.runner.XMLTestProgram()\n        opener.assert_called_once_with('test.xml', 'wb')\n        open_file = opener()\n        open_file.close.assert_called_with()\n\n        kwargs = dict(\n            buffer=mock.ANY,\n            failfast=mock.ANY,\n            verbosity=mock.ANY,\n            warnings=mock.ANY,\n            output=open_file,\n        )\n\n        if sys.version_info[:2] > (3, 4):\n            kwargs.update(tb_locals=mock.ANY)\n\n        testrunner.assert_called_once_with(**kwargs)\n        exiter.assert_called_once_with(False)\n\n    @mock.patch('sys.argv', ['xmlrunner', '--outsuffix', ''])\n    @mock.patch('xmlrunner.runner.open')\n    @mock.patch('xmlrunner.runner.XMLTestRunner')\n    @mock.patch('sys.exit')\n    def test_xmlrunner_outsuffix(self, exiter, testrunner, opener):\n        xmlrunner.runner.XMLTestProgram()\n\n        kwargs = dict(\n            buffer=mock.ANY,\n            failfast=mock.ANY,\n            verbosity=mock.ANY,\n            warnings=mock.ANY,\n            outsuffix='',\n        )\n\n        if sys.version_info[:2] > (3, 4):\n            kwargs.update(tb_locals=mock.ANY)\n\n        testrunner.assert_called_once_with(**kwargs)\n        exiter.assert_called_once_with(False)\n\n\nclass ResolveFilenameTestCase(unittest.TestCase):\n    @mock.patch('os.path.relpath')\n    def test_resolve_filename_relative(self, relpath):\n        relpath.return_value = 'somefile.py'\n        filename = resolve_filename('/path/to/somefile.py')\n        self.assertEqual(filename, 'somefile.py')\n\n    @mock.patch('os.path.relpath')\n    def test_resolve_filename_outside(self, relpath):\n        relpath.return_value = '../../../tmp/somefile.py'\n        filename = resolve_filename('/tmp/somefile.py')\n        self.assertEqual(filename, '/tmp/somefile.py')\n\n    @mock.patch('os.path.relpath')\n    def test_resolve_filename_error(self, relpath):\n        relpath.side_effect = ValueError(\"ValueError: path is on mount 'C:', start on mount 'D:'\")\n        filename = resolve_filename('C:\\\\path\\\\to\\\\somefile.py')\n        self.assertEqual(filename, 'C:\\\\path\\\\to\\\\somefile.py')\n"
  },
  {
    "path": "tests/vendor/jenkins/xunit-plugin/14c6e39c38408b9ed6280361484a13c6f5becca7/junit-10.xsd",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\nThe MIT License (MIT)\n\nCopyright (c) 2014, Gregory Boissinot\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n-->\n<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n\n    <xs:element name=\"failure\">\n        <xs:complexType mixed=\"true\">\n            <xs:attribute name=\"type\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"message\" type=\"xs:string\" use=\"optional\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"error\">\n        <xs:complexType mixed=\"true\">\n            <xs:attribute name=\"type\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"message\" type=\"xs:string\" use=\"optional\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"skipped\">\n        <xs:complexType mixed=\"true\">\n            <xs:attribute name=\"type\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"message\" type=\"xs:string\" use=\"optional\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"properties\">\n        <xs:complexType>\n            <xs:sequence>\n                <xs:element ref=\"property\" minOccurs=\"0\" maxOccurs=\"unbounded\"/>\n            </xs:sequence>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"property\">\n        <xs:complexType>\n            <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\"/>\n            <xs:attribute name=\"value\" type=\"xs:string\" use=\"required\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"system-err\" type=\"xs:string\"/>\n    <xs:element name=\"system-out\" type=\"xs:string\"/>\n\n    <xs:element name=\"testcase\">\n        <xs:complexType>\n            <xs:sequence>\n                <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n                    <xs:element ref=\"skipped\"/>\n                    <xs:element ref=\"error\"/>\n                    <xs:element ref=\"failure\"/>\n                    <xs:element ref=\"system-out\"/>\n                    <xs:element ref=\"system-err\"/>\n                </xs:choice>\n            </xs:sequence>\n            <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\"/>\n            <xs:attribute name=\"assertions\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"time\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"timestamp\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"classname\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"status\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"class\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"file\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"line\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"log\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"group\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"url\" type=\"xs:string\" use=\"optional\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"testsuite\">\n        <xs:complexType>\n            <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n                <xs:element ref=\"testsuite\"/>\n                <xs:element ref=\"properties\"/>\n                <xs:element ref=\"testcase\"/>\n                <xs:element ref=\"system-out\"/>\n                <xs:element ref=\"system-err\"/>\n            </xs:choice>\n            <xs:attribute name=\"name\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"tests\" type=\"xs:string\" use=\"required\"/>\n            <xs:attribute name=\"failures\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"errors\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"time\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"disabled\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"skipped\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"skips\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"timestamp\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"hostname\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"id\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"package\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"assertions\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"file\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"skip\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"log\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"url\" type=\"xs:string\" use=\"optional\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"testsuites\">\n        <xs:complexType>\n            <xs:sequence>\n                <xs:element ref=\"testsuite\" minOccurs=\"0\" maxOccurs=\"unbounded\"/>\n            </xs:sequence>\n            <xs:attribute name=\"name\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"time\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"tests\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"failures\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"disabled\" type=\"xs:string\" use=\"optional\"/>\n            <xs:attribute name=\"errors\" type=\"xs:string\" use=\"optional\"/>\n        </xs:complexType>\n    </xs:element>\n\n</xs:schema>\n"
  },
  {
    "path": "tests/vendor/jenkins/xunit-plugin/ae25da5089d4f94ac6c4669bf736e4d416cc4665/junit-10.xsd",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\nThe MIT License (MIT)\n\nCopyright (c) 2014, Gregory Boissinot\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n-->\n<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n    <xs:simpleType name=\"SUREFIRE_TIME\">\n        <xs:restriction base=\"xs:string\">\n            <xs:pattern value=\"(([0-9]{0,3},)*[0-9]{3}|[0-9]{0,3})*(\\.[0-9]{0,3})?\"/>\n        </xs:restriction>\n    </xs:simpleType>\n\n    <xs:complexType name=\"rerunType\" mixed=\"true\"> <!-- mixed (XML contains text) to be compatible with version previous than 2.22.1 -->\n        <xs:sequence>\n            <xs:element name=\"stackTrace\" type=\"xs:string\" minOccurs=\"0\" /> <!-- optional to be compatible with version previous than 2.22.1 -->\n            <xs:element name=\"system-out\" type=\"xs:string\" minOccurs=\"0\" />\n            <xs:element name=\"system-err\" type=\"xs:string\" minOccurs=\"0\" />\n        </xs:sequence>\n        <xs:attribute name=\"message\" type=\"xs:string\" />\n        <xs:attribute name=\"type\" type=\"xs:string\" use=\"required\" />\n    </xs:complexType>\n\n    <xs:element name=\"failure\">\n        <xs:complexType mixed=\"true\">\n            <xs:attribute name=\"type\" type=\"xs:string\"/>\n            <xs:attribute name=\"message\" type=\"xs:string\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"error\">\n        <xs:complexType mixed=\"true\">\n            <xs:attribute name=\"type\" type=\"xs:string\"/>\n            <xs:attribute name=\"message\" type=\"xs:string\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"skipped\">\n        <xs:complexType mixed=\"true\">\n            <xs:attribute name=\"type\" type=\"xs:string\"/>\n            <xs:attribute name=\"message\" type=\"xs:string\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"properties\">\n        <xs:complexType>\n            <xs:sequence>\n                <xs:element ref=\"property\" minOccurs=\"0\" maxOccurs=\"unbounded\"/>\n            </xs:sequence>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"property\">\n        <xs:complexType>\n            <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\"/>\n            <xs:attribute name=\"value\" type=\"xs:string\" use=\"required\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"system-err\" type=\"xs:string\"/>\n    <xs:element name=\"system-out\" type=\"xs:string\"/>\n    <xs:element name=\"rerunFailure\" type=\"rerunType\"/>\n    <xs:element name=\"rerunError\" type=\"rerunType\"/>\n    <xs:element name=\"flakyFailure\" type=\"rerunType\"/>\n    <xs:element name=\"flakyError\" type=\"rerunType\"/>\n\n    <xs:element name=\"testcase\">\n        <xs:complexType>\n            <xs:sequence>\n                <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n                    <xs:element ref=\"skipped\"/>\n                    <xs:element ref=\"error\"/>\n                    <xs:element ref=\"failure\"/>\n                    <xs:element ref=\"rerunFailure\" minOccurs=\"0\" maxOccurs=\"unbounded\"/>\n                    <xs:element ref=\"rerunError\" minOccurs=\"0\" maxOccurs=\"unbounded\"/>\n                    <xs:element ref=\"flakyFailure\" minOccurs=\"0\" maxOccurs=\"unbounded\"/>\n                    <xs:element ref=\"flakyError\" minOccurs=\"0\" maxOccurs=\"unbounded\"/>\n                    <xs:element ref=\"system-out\"/>\n                    <xs:element ref=\"system-err\"/>\n                </xs:choice>\n            </xs:sequence>\n            <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\"/>\n            <xs:attribute name=\"time\" type=\"xs:string\"/>\n            <xs:attribute name=\"classname\" type=\"xs:string\"/>\n            <xs:attribute name=\"group\" type=\"xs:string\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"testsuite\">\n        <xs:complexType>\n            <xs:choice minOccurs=\"0\" maxOccurs=\"unbounded\">\n                <xs:element ref=\"testsuite\"/>\n                <xs:element ref=\"properties\"/>\n                <xs:element ref=\"testcase\"/>\n                <xs:element ref=\"system-out\"/>\n                <xs:element ref=\"system-err\"/>\n            </xs:choice>\n            <xs:attribute name=\"name\" type=\"xs:string\" use=\"required\"/>\n            <xs:attribute name=\"tests\" type=\"xs:string\" use=\"required\"/>\n            <xs:attribute name=\"failures\" type=\"xs:string\" use=\"required\"/>\n            <xs:attribute name=\"errors\" type=\"xs:string\" use=\"required\"/>\n            <xs:attribute name=\"group\" type=\"xs:string\" />\n            <xs:attribute name=\"time\" type=\"SUREFIRE_TIME\"/>\n            <xs:attribute name=\"skipped\" type=\"xs:string\" />\n            <xs:attribute name=\"timestamp\" type=\"xs:string\" />\n            <xs:attribute name=\"hostname\" type=\"xs:string\" />\n            <xs:attribute name=\"id\" type=\"xs:string\" />\n            <xs:attribute name=\"package\" type=\"xs:string\" />\n            <xs:attribute name=\"file\" type=\"xs:string\"/>\n            <xs:attribute name=\"log\" type=\"xs:string\"/>\n            <xs:attribute name=\"url\" type=\"xs:string\"/>\n            <xs:attribute name=\"version\" type=\"xs:string\"/>\n        </xs:complexType>\n    </xs:element>\n\n    <xs:element name=\"testsuites\">\n        <xs:complexType>\n            <xs:sequence>\n                <xs:element ref=\"testsuite\" minOccurs=\"0\" maxOccurs=\"unbounded\" />\n            </xs:sequence>\n            <xs:attribute name=\"name\" type=\"xs:string\" />\n            <xs:attribute name=\"time\" type=\"SUREFIRE_TIME\"/>\n            <xs:attribute name=\"tests\" type=\"xs:string\" />\n            <xs:attribute name=\"failures\" type=\"xs:string\" />\n            <xs:attribute name=\"errors\" type=\"xs:string\" />\n        </xs:complexType>\n    </xs:element>\n\n</xs:schema>\n"
  },
  {
    "path": "tox.ini",
    "content": "[pytest]\npython_files = *_test.py test*.py\ntestpaths = tests\nnorecursedirs = tests/django_example\n\n[tox]\nenvlist = begin,py{py3,310,311,312,313,314},pytest,py314-django{lts,curr},end,quality\n\n[gh-actions]\npython =\n    3.10: py310\n    3.11: py311\n    3.12: py312\n    3.13: py313\n    3.14: begin,py314,py314-django{tls,curr},end,quality\n\n[testenv]\ndeps =\n    coverage\n    codecov>=1.4.0\n    coveralls\n    djangolts: django~=4.2.0\n    djangocurr: django~=5.2.0\n    pytest\n    lxml>=3.6.0\ncommands =\n    python -m coverage run --append -m pytest\n    python -m coverage report --omit='.tox/*'\n    python -m xmlrunner discover -p \"discovery_test.py\"\npassenv =\n    CI\n    TRAVIS\n    TRAVIS_*\n    CODECOV_TOKEN\n    COVERALLS_REPO_TOKEN\n    COVERALLS_*\n    GITHUB_ACTION\n    GITHUB_HEAD_REF\n    GITHUB_REF\n    GITHUB_REPOSITORY\n    GITHUB_RUN_ID\n    GITHUB_SHA\n    GITHUB_TOKEN\n\n[testenv:uploadcodecov]\ncommands =\n    codecov -e TOXENV\n\n[testenv:uploadcoveralls]\ncommands =\n    -coveralls --service=github\n\n[testenv:finishcoveralls]\ncommands =\n    -coveralls --service=github --finish\n\n[testenv:pytest]\ncommands = pytest\n\n[testenv:begin]\ncommands = coverage erase\n\n[testenv:end]\ncommands =\n    coverage report\n    coverage html\n\n[testenv:quality]\nignore_outcome = True\ndeps =\n    mccabe\n    pylint\n    flake8\n    pyroma\n    pep257\ncommands =\n    pylint xmlrunner tests\n    flake8 --max-complexity 10\n    pyroma .\n    pep257\n"
  },
  {
    "path": "xmlrunner/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module provides the XMLTestRunner class, which is heavily based on the\ndefault TextTestRunner.\n\"\"\"\n\n# Allow version to be detected at runtime.\ntry:\n    from .version import __version__\nexcept ImportError:\n    __version__ = \"unknown\"\n\nfrom .runner import XMLTestRunner\n\n__all__ = ('__version__', 'XMLTestRunner')\n"
  },
  {
    "path": "xmlrunner/__main__.py",
    "content": "\"\"\"Main entry point\"\"\"\n\nimport sys\nfrom .runner import XMLTestProgram\n\nif sys.argv[0].endswith(\"__main__.py\"):\n    import os.path\n    # We change sys.argv[0] to make help message more useful\n    # use executable without path, unquoted\n    # (it's just a hint anyway)\n    # (if you have spaces in your executable you get what you deserve!)\n    executable = os.path.basename(sys.executable)\n    sys.argv[0] = executable + \" -m xmlrunner\"\n    del os\n\n__unittest = True\n\n\nXMLTestProgram(module=None)\n"
  },
  {
    "path": "xmlrunner/builder.py",
    "content": "import re\nimport sys\nimport datetime\nimport time\n\nfrom xml.dom.minidom import Document\n\n\n__all__ = ('TestXMLBuilder', 'TestXMLContext')\n\n\n# see issue #74, the encoding name needs to be one of\n# http://www.iana.org/assignments/character-sets/character-sets.xhtml\nUTF8 = 'UTF-8'\n\n# Workaround for Python bug #5166\n# http://bugs.python.org/issue5166\n\n_char_tail = ''\n\nif sys.maxunicode > 0x10000:\n    _char_tail = (u'%s-%s') % (\n        chr(0x10000),\n        chr(min(sys.maxunicode, 0x10FFFF))\n    )\n\n_nontext_sub = re.compile(\n    r'[^\\x09\\x0A\\x0D\\x20-\\uD7FF\\uE000-\\uFFFD%s]' % _char_tail,\n    re.U\n).sub\n\n\ndef replace_nontext(text, replacement=u'\\uFFFD'):\n    return _nontext_sub(replacement, text)\n\n\nclass TestXMLContext(object):\n    \"\"\"A XML report file have a distinct hierarchy. The outermost element is\n    'testsuites', which contains one or more 'testsuite' elements. The role of\n    these elements is to give the proper context to 'testcase' elements.\n\n    These contexts have a few things in common: they all have some sort of\n    counters (i.e. how many testcases are inside that context, how many failed,\n    and so on), they all have a 'time' attribute indicating how long it took\n    for their testcases to run, etc.\n\n    The purpose of this class is to abstract the job of composing this\n    hierarchy while keeping track of counters and how long it took for a\n    context to be processed.\n    \"\"\"\n\n    # Allowed keys for self.counters\n    _allowed_counters = ('tests', 'errors', 'failures', 'skipped',)\n\n    def __init__(self, xml_doc, parent_context=None):\n        \"\"\"Creates a new instance of a root or nested context (depending whether\n        `parent_context` is provided or not).\n        \"\"\"\n        self.xml_doc = xml_doc\n        self.parent = parent_context\n        self._start_time_m = 0\n        self._stop_time_m = 0\n        self._stop_time = 0\n        self.counters = {}\n\n    def element_tag(self):\n        \"\"\"Returns the name of the tag represented by this context.\n        \"\"\"\n        return self.element.tagName\n\n    def begin(self, tag, name):\n        \"\"\"Begins the creation of this context in the XML document by creating\n        an empty tag <tag name='param'>.\n        \"\"\"\n        self.element = self.xml_doc.createElement(tag)\n        self.element.setAttribute('name', replace_nontext(name))\n        self._start_time = time.monotonic()\n\n    def end(self):\n        \"\"\"Closes this context (started with a call to `begin`) and creates an\n        attribute for each counter and another for the elapsed time.\n        \"\"\"\n        # time.monotonic is reliable for measuring differences, not affected by NTP\n        self._stop_time_m = time.monotonic()\n        # time.time is used for reference point\n        self._stop_time = time.time()\n        self.element.setAttribute('time', self.elapsed_time())\n        self.element.setAttribute('timestamp', self.timestamp())\n        self._set_result_counters()\n        return self.element\n\n    def _set_result_counters(self):\n        \"\"\"Sets an attribute in this context's tag for each counter considering\n        what's valid for each tag name.\n        \"\"\"\n        tag = self.element_tag()\n\n        for counter_name in TestXMLContext._allowed_counters:\n            valid_counter_for_element = False\n\n            if counter_name == 'skipped':\n                valid_counter_for_element = (\n                    tag == 'testsuite'\n                )\n            else:\n                valid_counter_for_element = (\n                    tag in ('testsuites', 'testsuite')\n                )\n\n            if valid_counter_for_element:\n                value = str(\n                    self.counters.get(counter_name, 0)\n                )\n                self.element.setAttribute(counter_name, value)\n\n    def increment_counter(self, counter_name):\n        \"\"\"Increments a counter named by `counter_name`, which can be any one\n        defined in `_allowed_counters`.\n        \"\"\"\n        if counter_name in TestXMLContext._allowed_counters:\n            self.counters[counter_name] = \\\n                self.counters.get(counter_name, 0) + 1\n\n    def elapsed_time(self):\n        \"\"\"Returns the time the context took to run between the calls to\n        `begin()` and `end()`, in seconds.\n        \"\"\"\n        return format(self._stop_time_m - self._start_time_m, '.3f')\n\n    def timestamp(self):\n        \"\"\"Returns the time the context ended as ISO-8601-formatted timestamp.\n        \"\"\"\n        return datetime.datetime.fromtimestamp(self._stop_time).replace(microsecond=0).isoformat()\n\n\nclass TestXMLBuilder(object):\n    \"\"\"This class encapsulates most rules needed to create a XML test report\n    behind a simple interface.\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"Creates a new instance.\n        \"\"\"\n        self._xml_doc = Document()\n        self._current_context = None\n\n    def current_context(self):\n        \"\"\"Returns the current context.\n        \"\"\"\n        return self._current_context\n\n    def begin_context(self, tag, name):\n        \"\"\"Begins a new context in the XML test report, which usually is defined\n        by one on the tags 'testsuites', 'testsuite', or 'testcase'.\n        \"\"\"\n        context = TestXMLContext(self._xml_doc, self._current_context)\n        context.begin(tag, name)\n\n        self._current_context = context\n\n    def context_tag(self):\n        \"\"\"Returns the tag represented by the current context.\n        \"\"\"\n        return self._current_context.element_tag()\n\n    def _create_cdata_section(self, content):\n        \"\"\"Returns a new CDATA section containing the string defined in\n        `content`.\n        \"\"\"\n        filtered_content = replace_nontext(content)\n        return self._xml_doc.createCDATASection(filtered_content)\n\n    def append_cdata_section(self, tag, content):\n        \"\"\"Appends a tag in the format <tag>CDATA</tag> into the tag represented\n        by the current context. Returns the created tag.\n        \"\"\"\n        element = self._xml_doc.createElement(tag)\n\n        pos = content.find(']]>')\n        while pos >= 0:\n            tmp = content[0:pos+2]\n            element.appendChild(self._create_cdata_section(tmp))\n            content = content[pos+2:]\n            pos = content.find(']]>')\n\n        element.appendChild(self._create_cdata_section(content))\n\n        self._append_child(element)\n        return element\n\n    def append(self, tag, content, **kwargs):\n        \"\"\"Apends a tag in the format <tag attr='val' attr2='val2'>CDATA</tag>\n        into the tag represented by the current context. Returns the created\n        tag.\n        \"\"\"\n        element = self._xml_doc.createElement(tag)\n\n        for key, value in kwargs.items():\n            filtered_value = replace_nontext(str(value))\n            element.setAttribute(key, filtered_value)\n\n        if content:\n            element.appendChild(self._create_cdata_section(content))\n\n        self._append_child(element)\n        return element\n\n    def _append_child(self, element):\n        \"\"\"Appends a tag object represented by `element` into the tag\n        represented by the current context.\n        \"\"\"\n        if self._current_context:\n            self._current_context.element.appendChild(element)\n        else:\n            self._xml_doc.appendChild(element)\n\n    def increment_counter(self, counter_name):\n        \"\"\"Increments a counter in the current context and their parents.\n        \"\"\"\n        context = self._current_context\n\n        while context:\n            context.increment_counter(counter_name)\n            context = context.parent\n\n    def end_context(self):\n        \"\"\"Ends the current context and sets the current context as being the\n        previous one (if it exists). Also, when a context ends, its tag is\n        appended in the proper place inside the document.\n        \"\"\"\n        if not self._current_context:\n            return False\n\n        element = self._current_context.end()\n\n        self._current_context = self._current_context.parent\n        self._append_child(element)\n\n        return True\n\n    def finish(self):\n        \"\"\"Ends all open contexts and returns a pretty printed version of the\n        generated XML document.\n        \"\"\"\n        while self.end_context():\n            pass\n        return self._xml_doc.toprettyxml(indent='\\t', encoding=UTF8)\n"
  },
  {
    "path": "xmlrunner/extra/__init__.py",
    "content": ""
  },
  {
    "path": "xmlrunner/extra/djangotestrunner.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCustom Django test runner that runs the tests using the\nXMLTestRunner class.\n\nThis script shows how to use the XMLTestRunner in a Django project. To learn\nhow to configure a custom TestRunner in a Django project, please read the\nDjango docs website.\n\"\"\"\n\nimport os\nimport xmlrunner\nimport os.path\nfrom django.conf import settings\nfrom django.test.runner import DiscoverRunner\n\n\nclass XMLTestRunner(DiscoverRunner):\n    test_runner = xmlrunner.XMLTestRunner\n\n    def get_resultclass(self):\n        # Django provides `DebugSQLTextTestResult` if `debug_sql` argument is True\n        # To use `xmlrunner.result._XMLTestResult` we supress default behavior\n        return None\n\n    def get_test_runner_kwargs(self):\n        # We use separate verbosity setting for our runner\n        verbosity = getattr(settings, 'TEST_OUTPUT_VERBOSE', 1)\n        if isinstance(verbosity, bool):\n            verbosity = (1, 2)[verbosity]\n        verbosity = verbosity  # not self.verbosity\n\n        output_dir = getattr(settings, 'TEST_OUTPUT_DIR', '.')\n        single_file = getattr(settings, 'TEST_OUTPUT_FILE_NAME', None)\n\n        # For single file case we are able to create file here\n        # But for multiple files case files will be created inside runner/results\n        if single_file is None:  # output will be a path (folder)\n            output = output_dir\n        else:  # output will be a stream\n            if not os.path.exists(output_dir):\n                os.makedirs(output_dir)\n            file_path = os.path.join(output_dir, single_file)\n            output = open(file_path, 'wb')\n\n        return dict(\n            verbosity=verbosity,\n            descriptions=getattr(settings, 'TEST_OUTPUT_DESCRIPTIONS', False),\n            failfast=self.failfast,\n            resultclass=self.get_resultclass(),\n            output=output,\n        )\n\n    def run_suite(self, suite, **kwargs):\n        runner_kwargs = self.get_test_runner_kwargs()\n        runner = self.test_runner(**runner_kwargs)\n        results = runner.run(suite)\n        if hasattr(runner_kwargs['output'], 'close'):\n            runner_kwargs['output'].close()\n        return results\n"
  },
  {
    "path": "xmlrunner/extra/xunit_plugin.py",
    "content": "import io\nimport lxml.etree as etree\n\n\nTRANSFORM = etree.XSLT(etree.XML(b'''\\\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<xsl:stylesheet xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\" version=\"2.0\">\n    <xsl:output method=\"xml\" indent=\"yes\" />\n\n    <!-- /dev/null for these attributes -->\n    <xsl:template match=\"//testcase/@file\" />\n    <xsl:template match=\"//testcase/@line\" />\n    <xsl:template match=\"//testcase/@timestamp\" />\n\n    <!-- copy the rest -->\n    <xsl:template match=\"node()|@*\">\n        <xsl:copy>\n            <xsl:apply-templates select=\"node()|@*\" />\n        </xsl:copy>\n    </xsl:template>\n</xsl:stylesheet>'''))\n\n\ndef transform(xml_data):\n    out = io.BytesIO()\n    xml_doc = etree.XML(xml_data)\n    result = TRANSFORM(xml_doc)\n    result.write(out)\n    return out.getvalue()\n"
  },
  {
    "path": "xmlrunner/result.py",
    "content": "\nimport inspect\nimport io\nimport os\nimport sys\nimport datetime\nimport traceback\nimport re\nfrom os import path\nfrom io import StringIO\n\n# use direct import to bypass freezegun\nfrom time import time\n\nfrom .unittest import TestResult, TextTestResult, failfast\n\n\n# Matches invalid XML1.0 unicode characters, like control characters:\n# http://www.w3.org/TR/2006/REC-xml-20060816/#charsets\n# http://stackoverflow.com/questions/1707890/fast-way-to-filter-illegal-xml-unicode-chars-in-python\n\n_illegal_unichrs = [\n    (0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F),\n    (0x7F, 0x84), (0x86, 0x9F),\n    (0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF),\n]\nif sys.maxunicode >= 0x10000:  # not narrow build\n    _illegal_unichrs.extend([\n        (0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF),\n        (0x3FFFE, 0x3FFFF), (0x4FFFE, 0x4FFFF),\n        (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),\n        (0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF),\n        (0x9FFFE, 0x9FFFF), (0xAFFFE, 0xAFFFF),\n        (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),\n        (0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF),\n        (0xFFFFE, 0xFFFFF), (0x10FFFE, 0x10FFFF),\n    ])\n\n_illegal_ranges = [\n    \"%s-%s\" % (chr(low), chr(high))\n    for (low, high) in _illegal_unichrs\n]\n\nINVALID_XML_1_0_UNICODE_RE = re.compile(u'[%s]' % u''.join(_illegal_ranges))\n\n\nSTDOUT_LINE = '\\nStdout:\\n%s'\nSTDERR_LINE = '\\nStderr:\\n%s'\n\n\ndef safe_unicode(data, encoding='utf8'):\n    \"\"\"Return a unicode string containing only valid XML characters.\n\n    encoding - if data is a byte string it is first decoded to unicode\n        using this encoding.\n    \"\"\"\n    data = str(data)\n    return INVALID_XML_1_0_UNICODE_RE.sub('', data)\n\n\ndef testcase_name(test_method):\n    testcase = type(test_method)\n\n    # Ignore module name if it is '__main__'\n    module = testcase.__module__ + '.'\n    if module == '__main__.':\n        module = ''\n    result = module + testcase.__name__\n    return result\n\n\ndef resolve_filename(filename):\n    # Try to make filename relative to current directory.\n    try:\n        rel_filename = os.path.relpath(filename)\n    except ValueError:\n        return filename\n    # if not inside folder, keep as-is\n    return filename if rel_filename.startswith('../') else rel_filename\n\n\nclass _DuplicateWriter(io.TextIOBase):\n    \"\"\"\n    Duplicate output from the first handle to the second handle\n\n    The second handle is expected to be a StringIO and not to block.\n    \"\"\"\n\n    def __init__(self, first, second):\n        super(_DuplicateWriter, self).__init__()\n        self._first = first\n        self._second = second\n\n    def flush(self):\n        try:\n            self._first.flush()\n        except ValueError:\n            pass\n        try:\n            self._second.flush()\n        except ValueError:\n            pass\n\n    def writable(self):\n        return True\n\n    def getvalue(self):\n        return self._second.getvalue()\n\n    def writelines(self, lines):\n        self._first.writelines(lines)\n        self._second.writelines(lines)\n\n    def write(self, b):\n        if isinstance(self._first, io.TextIOBase):\n            wrote = self._first.write(b)\n\n            if wrote is not None:\n                # expected to always succeed to write\n                self._second.write(b[:wrote])\n\n            return wrote\n        else:\n            # file-like object that doesn't return wrote bytes.\n            self._first.write(b)\n            self._second.write(b)\n            return len(b)\n\n\nclass _TestInfo(object):\n    \"\"\"\n    This class keeps useful information about the execution of a\n    test method.\n    \"\"\"\n\n    # Possible test outcomes\n    (SUCCESS, FAILURE, ERROR, SKIP) = range(4)\n\n    OUTCOME_ELEMENTS = {\n        SUCCESS: None,\n        FAILURE: 'failure',\n        ERROR: 'error',\n        SKIP: 'skipped',\n    }\n\n    def __init__(self, test_result, test_method, outcome=SUCCESS, err=None, subTest=None, filename=None, lineno=None, doc=None):\n        self.test_result = test_result\n        self.outcome = outcome\n        self.elapsed_time = 0\n        self.timestamp = datetime.datetime.min.replace(microsecond=0).isoformat()\n        if err is not None:\n            if self.outcome != _TestInfo.SKIP:\n                self.test_exception_name = safe_unicode(err[0].__name__)\n                self.test_exception_message = safe_unicode(err[1])\n            else:\n                self.test_exception_message = safe_unicode(err)\n\n        self.stdout = test_result._stdout_data\n        self.stderr = test_result._stderr_data\n\n        self.test_description = self.test_result.getDescription(test_method)\n        self.test_exception_info = (\n            '' if outcome in (self.SUCCESS, self.SKIP)\n            else self.test_result._exc_info_to_string(\n                    err, test_method)\n        )\n\n        self.test_name = testcase_name(test_method)\n        self.test_id = test_method.id()\n\n        if subTest:\n            self.test_id = subTest.id()\n            self.test_description = self.test_result.getDescription(subTest)\n\n        self.filename = filename\n        self.lineno = lineno\n        self.doc = doc\n\n    def id(self):\n        return self.test_id\n\n    def test_finished(self):\n        \"\"\"Save info that can only be calculated once a test has run.\n        \"\"\"\n        self.elapsed_time = \\\n            self.test_result.stop_time - self.test_result.start_time\n        timestamp = datetime.datetime.fromtimestamp(self.test_result.stop_time)\n        self.timestamp = timestamp.replace(microsecond=0).isoformat()\n\n    def get_error_info(self):\n        \"\"\"\n        Return a text representation of an exception thrown by a test\n        method.\n        \"\"\"\n        return self.test_exception_info\n\n    def shortDescription(self):\n        return self.test_description\n\n\nclass _XMLTestResult(TextTestResult):\n    \"\"\"\n    A test result class that can express test results in a XML report.\n\n    Used by XMLTestRunner.\n    \"\"\"\n    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,\n                 elapsed_times=True, properties=None, infoclass=None):\n        TextTestResult.__init__(self, stream, descriptions, verbosity)\n        self._stdout_data = None\n        self._stderr_data = None\n        self._stdout_capture = StringIO()\n        self.__stdout_saved = None\n        self._stderr_capture = StringIO()\n        self.__stderr_saved = None\n        self.successes = []\n        self.callback = None\n        self.elapsed_times = elapsed_times\n        self.properties = properties  # junit testsuite properties\n        self.filename = None\n        self.lineno = None\n        self.doc = None\n        if infoclass is None:\n            self.infoclass = _TestInfo\n        else:\n            self.infoclass = infoclass\n\n    def _prepare_callback(self, test_info, target_list, verbose_str,\n                          short_str):\n        \"\"\"\n        Appends a `infoclass` to the given target list and sets a callback\n        method to be called by stopTest method.\n        \"\"\"\n        test_info.filename = self.filename\n        test_info.lineno = self.lineno\n        test_info.doc = self.doc\n        target_list.append(test_info)\n\n        def callback():\n            \"\"\"Prints the test method outcome to the stream, as well as\n            the elapsed time.\n            \"\"\"\n\n            test_info.test_finished()\n\n            # Ignore the elapsed times for a more reliable unit testing\n            if not self.elapsed_times:\n                self.start_time = self.stop_time = 0\n\n            if self.showAll:\n                self.stream.writeln(\n                    '%s (%.3fs)' % (verbose_str, test_info.elapsed_time)\n                )\n            elif self.dots:\n                self.stream.write(short_str)\n\n            self.stream.flush()\n\n        self.callback = callback\n\n    def startTest(self, test):\n        \"\"\"\n        Called before execute each test method.\n        \"\"\"\n        self.start_time = time()\n        TestResult.startTest(self, test)\n\n        try:\n            if getattr(test, '_dt_test', None) is not None:\n                # doctest.DocTestCase\n                self.filename = test._dt_test.filename\n                self.lineno = test._dt_test.lineno\n            else:\n                # regular unittest.TestCase?\n                test_method = getattr(test, test._testMethodName)\n                test_class = type(test)\n                # Note: inspect can get confused with decorators, so use class.\n                self.filename = inspect.getsourcefile(test_class)\n                # Handle partial and partialmethod objects.\n                test_method = getattr(test_method, 'func', test_method)\n                _, self.lineno = inspect.getsourcelines(test_method)\n\n                self.doc = test_method.__doc__\n        except (AttributeError, IOError, TypeError):\n            # issue #188, #189, #195\n            # some frameworks can make test method opaque.\n            pass\n\n        if self.showAll:\n            self.stream.write('  ' + self.getDescription(test))\n            self.stream.write(\" ... \")\n            self.stream.flush()\n\n    def _setupStdout(self):\n        \"\"\"\n        Capture stdout / stderr by replacing sys.stdout / sys.stderr\n        \"\"\"\n        super(_XMLTestResult, self)._setupStdout()\n        self.__stdout_saved = sys.stdout\n        sys.stdout = _DuplicateWriter(sys.stdout, self._stdout_capture)\n        self.__stderr_saved = sys.stderr\n        sys.stderr = _DuplicateWriter(sys.stderr, self._stderr_capture)\n\n    def _restoreStdout(self):\n        \"\"\"\n        Stop capturing stdout / stderr and recover sys.stdout / sys.stderr\n        \"\"\"\n        if self.__stdout_saved:\n            sys.stdout = self.__stdout_saved\n            self.__stdout_saved = None\n        if self.__stderr_saved:\n            sys.stderr = self.__stderr_saved\n            self.__stderr_saved = None\n        self._stdout_capture.seek(0)\n        self._stdout_capture.truncate()\n        self._stderr_capture.seek(0)\n        self._stderr_capture.truncate()\n        super(_XMLTestResult, self)._restoreStdout()\n\n    def _save_output_data(self):\n        self._stdout_data = self._stdout_capture.getvalue()\n        self._stderr_data = self._stderr_capture.getvalue()\n\n    def stopTest(self, test):\n        \"\"\"\n        Called after execute each test method.\n        \"\"\"\n        self._save_output_data()\n        # self._stdout_data = sys.stdout.getvalue()\n        # self._stderr_data = sys.stderr.getvalue()\n\n        TextTestResult.stopTest(self, test)\n        self.stop_time = time()\n\n        if self.callback and callable(self.callback):\n            self.callback()\n            self.callback = None\n\n    def addSuccess(self, test):\n        \"\"\"\n        Called when a test executes successfully.\n        \"\"\"\n        self._save_output_data()\n        self._prepare_callback(\n            self.infoclass(self, test), self.successes, 'ok', '.'\n        )\n\n    @failfast\n    def addFailure(self, test, err):\n        \"\"\"\n        Called when a test method fails.\n        \"\"\"\n        self._save_output_data()\n        testinfo = self.infoclass(\n            self, test, self.infoclass.FAILURE, err)\n        self.failures.append((\n            testinfo,\n            self._exc_info_to_string(err, test)\n        ))\n        self._prepare_callback(testinfo, [], 'FAIL', 'F')\n\n    @failfast\n    def addError(self, test, err):\n        \"\"\"\n        Called when a test method raises an error.\n        \"\"\"\n        self._save_output_data()\n        testinfo = self.infoclass(\n            self, test, self.infoclass.ERROR, err)\n        self.errors.append((\n            testinfo,\n            self._exc_info_to_string(err, test)\n        ))\n        self._prepare_callback(testinfo, [], 'ERROR', 'E')\n\n    def addSubTest(self, testcase, test, err):\n        \"\"\"\n        Called when a subTest method raises an error.\n        \"\"\"\n        if err is not None:\n\n            errorText = None\n            errorValue = None\n            errorList = None\n            if issubclass(err[0], test.failureException):\n                errorText = 'FAIL'\n                errorValue = self.infoclass.FAILURE\n                errorList = self.failures\n\n            else:\n                errorText = 'ERROR'\n                errorValue = self.infoclass.ERROR\n                errorList = self.errors\n\n            self._save_output_data()\n\n            testinfo = self.infoclass(\n                self, testcase, errorValue, err, subTest=test)\n            errorList.append((\n                testinfo,\n                self._exc_info_to_string(err, testcase)\n            ))\n            self._prepare_callback(testinfo, [], errorText, errorText[0])\n\n    def addSkip(self, test, reason):\n        \"\"\"\n        Called when a test method was skipped.\n        \"\"\"\n        self._save_output_data()\n        testinfo = self.infoclass(\n            self, test, self.infoclass.SKIP, reason)\n        testinfo.test_exception_name = 'skip'\n        testinfo.test_exception_message = reason\n        self.skipped.append((testinfo, reason))\n        self._prepare_callback(testinfo, [], 'skip', 's')\n\n    def addExpectedFailure(self, test, err):\n        \"\"\"\n        Missing in xmlrunner, copy-pasted from xmlrunner addError.\n        \"\"\"\n        self._save_output_data()\n\n        testinfo = self.infoclass(self, test, self.infoclass.SKIP, err)\n        testinfo.test_exception_name = 'XFAIL'\n        testinfo.test_exception_message = 'expected failure: {}'.format(testinfo.test_exception_message)\n\n        self.expectedFailures.append((testinfo, self._exc_info_to_string(err, test)))\n        self._prepare_callback(testinfo, [], 'expected failure', 'x')\n\n    @failfast\n    def addUnexpectedSuccess(self, test):\n        \"\"\"\n        Missing in xmlrunner, copy-pasted from xmlrunner addSuccess.\n        \"\"\"\n        self._save_output_data()\n\n        testinfo = self.infoclass(self, test)  # do not set outcome here because it will need exception\n        testinfo.outcome = self.infoclass.ERROR\n        # But since we want to have error outcome, we need to provide additional fields:\n        testinfo.test_exception_name = 'UnexpectedSuccess'\n        testinfo.test_exception_message = ('Unexpected success: This test was marked as expected failure but passed, '\n                                           'please review it')\n\n        self.unexpectedSuccesses.append((testinfo, 'unexpected success'))\n        self._prepare_callback(testinfo, [], 'unexpected success', 'u')\n\n    def printErrorList(self, flavour, errors):\n        \"\"\"\n        Writes information about the FAIL or ERROR to the stream.\n        \"\"\"\n        for test_info, dummy in errors:\n            self.stream.writeln(self.separator1)\n            self.stream.writeln(\n                '%s [%.3fs]: %s' % (flavour, test_info.elapsed_time,\n                                    test_info.test_description)\n            )\n            self.stream.writeln(self.separator2)\n            self.stream.writeln('%s' % test_info.get_error_info())\n            self.stream.flush()\n\n    def _get_info_by_testcase(self):\n        \"\"\"\n        Organizes test results by TestCase module. This information is\n        used during the report generation, where a XML report will be created\n        for each TestCase.\n        \"\"\"\n        tests_by_testcase = {}\n\n        for tests in (self.successes, self.failures, self.errors,\n                      self.skipped, self.expectedFailures, self.unexpectedSuccesses):\n            for test_info in tests:\n                if isinstance(test_info, tuple):\n                    # This is a skipped, error or a failure test case\n                    test_info = test_info[0]\n                testcase_name = test_info.test_name\n                if testcase_name not in tests_by_testcase:\n                    tests_by_testcase[testcase_name] = []\n                tests_by_testcase[testcase_name].append(test_info)\n\n        return tests_by_testcase\n\n    def _report_testsuite_properties(xml_testsuite, xml_document, properties):\n        if properties:\n            xml_properties = xml_document.createElement('properties')\n            xml_testsuite.appendChild(xml_properties)\n            for key, value in properties.items():\n                prop = xml_document.createElement('property')\n                prop.setAttribute('name', str(key))\n                prop.setAttribute('value', str(value))\n                xml_properties.appendChild(prop)\n\n    _report_testsuite_properties = staticmethod(_report_testsuite_properties)\n\n    def _report_testsuite(suite_name, tests, xml_document, parentElement,\n                          properties):\n        \"\"\"\n        Appends the testsuite section to the XML document.\n        \"\"\"\n        testsuite = xml_document.createElement('testsuite')\n        parentElement.appendChild(testsuite)\n        module_name = suite_name.rpartition('.')[0]\n        file_name = module_name.replace('.', '/') + '.py'\n\n        testsuite.setAttribute('name', suite_name)\n        testsuite.setAttribute('tests', str(len(tests)))\n        testsuite.setAttribute('file', file_name)\n\n        testsuite.setAttribute(\n            'time', '%.3f' % sum(map(lambda e: e.elapsed_time, tests))\n        )\n        if tests:\n            testsuite.setAttribute(\n                'timestamp', max(map(lambda e: e.timestamp, tests))\n            )\n        failures = filter(lambda e: e.outcome == e.FAILURE, tests)\n        testsuite.setAttribute('failures', str(len(list(failures))))\n\n        errors = filter(lambda e: e.outcome == e.ERROR, tests)\n        testsuite.setAttribute('errors', str(len(list(errors))))\n\n        skips = filter(lambda e: e.outcome == _TestInfo.SKIP, tests)\n        testsuite.setAttribute('skipped', str(len(list(skips))))\n\n        _XMLTestResult._report_testsuite_properties(\n            testsuite, xml_document, properties)\n\n        for test in tests:\n            _XMLTestResult._report_testcase(test, testsuite, xml_document)\n\n        return testsuite\n\n    _report_testsuite = staticmethod(_report_testsuite)\n\n    def _test_method_name(test_id):\n        \"\"\"\n        Returns the test method name.\n        \"\"\"\n        # Trick subtest referencing objects\n        subtest_parts = test_id.split(' ')\n        test_method_name = subtest_parts[0].split('.')[-1]\n        subtest_method_name = [test_method_name] + subtest_parts[1:]\n        return ' '.join(subtest_method_name)\n\n    _test_method_name = staticmethod(_test_method_name)\n\n    def _createCDATAsections(xmldoc, node, text):\n        text = safe_unicode(text)\n        pos = text.find(']]>')\n        while pos >= 0:\n            tmp = text[0:pos+2]\n            cdata = xmldoc.createCDATASection(tmp)\n            node.appendChild(cdata)\n            text = text[pos+2:]\n            pos = text.find(']]>')\n        cdata = xmldoc.createCDATASection(text)\n        node.appendChild(cdata)\n\n    _createCDATAsections = staticmethod(_createCDATAsections)\n\n    def _report_testcase(test_result, xml_testsuite, xml_document):\n        \"\"\"\n        Appends a testcase section to the XML document.\n        \"\"\"\n        testcase = xml_document.createElement('testcase')\n        xml_testsuite.appendChild(testcase)\n\n        class_name = re.sub(r'^__main__.', '', test_result.id())\n\n        # Trick subtest referencing objects\n        class_name = class_name.split(' ')[0].rpartition('.')[0]\n\n        testcase.setAttribute('classname', class_name)\n        testcase.setAttribute(\n            'name', _XMLTestResult._test_method_name(test_result.test_id)\n        )\n        testcase.setAttribute('time', '%.3f' % test_result.elapsed_time)\n        testcase.setAttribute('timestamp', test_result.timestamp)\n\n        if test_result.filename is not None:\n            # Try to make filename relative to current directory.\n            filename = resolve_filename(test_result.filename)\n            testcase.setAttribute('file', filename)\n\n        if test_result.lineno is not None:\n            testcase.setAttribute('line', str(test_result.lineno))\n\n        if test_result.doc is not None:\n            comment = str(test_result.doc)\n            # The use of '--' is forbidden in XML comments\n            comment = comment.replace('--', '&#45;&#45;')\n            testcase.appendChild(xml_document.createComment(safe_unicode(comment)))\n\n        result_elem_name = test_result.OUTCOME_ELEMENTS[test_result.outcome]\n\n        if result_elem_name is not None:\n            result_elem = xml_document.createElement(result_elem_name)\n            testcase.appendChild(result_elem)\n\n            result_elem.setAttribute(\n                'type',\n                test_result.test_exception_name\n            )\n            result_elem.setAttribute(\n                'message',\n                test_result.test_exception_message\n            )\n            if test_result.get_error_info():\n                error_info = safe_unicode(test_result.get_error_info())\n                _XMLTestResult._createCDATAsections(\n                    xml_document, result_elem, error_info)\n\n        if test_result.stdout:\n            systemout = xml_document.createElement('system-out')\n            testcase.appendChild(systemout)\n            _XMLTestResult._createCDATAsections(\n                xml_document, systemout, test_result.stdout)\n\n        if test_result.stderr:\n            systemout = xml_document.createElement('system-err')\n            testcase.appendChild(systemout)\n            _XMLTestResult._createCDATAsections(\n                xml_document, systemout, test_result.stderr)\n\n    _report_testcase = staticmethod(_report_testcase)\n\n    def generate_reports(self, test_runner):\n        \"\"\"\n        Generates the XML reports to a given XMLTestRunner object.\n        \"\"\"\n        from xml.dom.minidom import Document\n        all_results = self._get_info_by_testcase()\n\n        outputHandledAsString = \\\n            isinstance(test_runner.output, str)\n\n        if (outputHandledAsString and not os.path.exists(test_runner.output)):\n            os.makedirs(test_runner.output)\n\n        if not outputHandledAsString:\n            doc = Document()\n            testsuite = doc.createElement('testsuites')\n            doc.appendChild(testsuite)\n            parentElement = testsuite\n\n        for suite, tests in all_results.items():\n            if outputHandledAsString:\n                doc = Document()\n                parentElement = doc\n\n            suite_name = suite\n            if test_runner.outsuffix:\n                # not checking with 'is not None', empty means no suffix.\n                suite_name = '%s-%s' % (suite, test_runner.outsuffix)\n\n            # Build the XML file\n            testsuite = _XMLTestResult._report_testsuite(\n                suite_name, tests, doc, parentElement, self.properties\n            )\n\n            if outputHandledAsString:\n                xml_content = doc.toprettyxml(\n                    indent='\\t',\n                    encoding=test_runner.encoding\n                )\n                filename = path.join(\n                    test_runner.output,\n                    'TEST-%s.xml' % suite_name)\n                with open(filename, 'wb') as report_file:\n                    report_file.write(xml_content)\n\n                if self.showAll:\n                    self.stream.writeln('Generated XML report: {}'.format(filename))\n\n        if not outputHandledAsString:\n            # Assume that test_runner.output is a stream\n            xml_content = doc.toprettyxml(\n                indent='\\t',\n                encoding=test_runner.encoding\n            )\n            test_runner.output.write(xml_content)\n\n    def _exc_info_to_string(self, err, test):\n        \"\"\"Converts a sys.exc_info()-style tuple of values into a string.\"\"\"\n        return super(_XMLTestResult, self)._exc_info_to_string(err, test)\n\n    def getDescription(self, test):\n        if isinstance(test, tuple):\n            test = test[0]\n        return super().getDescription(test)\n"
  },
  {
    "path": "xmlrunner/runner.py",
    "content": "\nimport argparse\nimport sys\nimport time\n\nfrom .unittest import TextTestRunner, TestProgram\nfrom .result import _XMLTestResult\n\n# see issue #74, the encoding name needs to be one of\n# http://www.iana.org/assignments/character-sets/character-sets.xhtml\nUTF8 = 'UTF-8'\n\n\nclass XMLTestRunner(TextTestRunner):\n    \"\"\"\n    A test runner class that outputs the results in JUnit like XML files.\n    \"\"\"\n\n    def __init__(self, output='.', outsuffix=None,\n                 elapsed_times=True, encoding=UTF8,\n                 resultclass=None,\n                 **kwargs):\n        super(XMLTestRunner, self).__init__(**kwargs)\n        self.output = output\n        self.encoding = encoding\n        # None means default timestamped suffix\n        # '' (empty) means no suffix\n        if outsuffix is None:\n            outsuffix = time.strftime(\"%Y%m%d%H%M%S\")\n        self.outsuffix = outsuffix\n        self.elapsed_times = elapsed_times\n        if resultclass is None:\n            self.resultclass = _XMLTestResult\n        else:\n            self.resultclass = resultclass\n\n    def _make_result(self):\n        \"\"\"\n        Creates a TestResult object which will be used to store\n        information about the executed tests.\n        \"\"\"\n        # override in subclasses if necessary.\n        return self.resultclass(\n            self.stream, self.descriptions, self.verbosity, self.elapsed_times\n        )\n\n    def run(self, test):\n        \"\"\"\n        Runs the given test case or test suite.\n        \"\"\"\n        try:\n            # Prepare the test execution\n            result = self._make_result()\n            result.failfast = self.failfast\n            result.buffer = self.buffer\n            if hasattr(test, 'properties'):\n                # junit testsuite properties\n                result.properties = test.properties\n\n            # Print a nice header\n            self.stream.writeln()\n            self.stream.writeln('Running tests...')\n            self.stream.writeln(result.separator2)\n\n            # Execute tests\n            start_time = time.monotonic()\n            test(result)\n            stop_time = time.monotonic()\n            time_taken = stop_time - start_time\n\n            # Print results\n            result.printErrors()\n            self.stream.writeln(result.separator2)\n            run = result.testsRun\n            self.stream.writeln(\"Ran %d test%s in %.3fs\" % (\n                run, run != 1 and \"s\" or \"\", time_taken)\n            )\n            self.stream.writeln()\n\n            # other metrics\n            expectedFails = len(result.expectedFailures)\n            unexpectedSuccesses = len(result.unexpectedSuccesses)\n            skipped = len(result.skipped)\n\n            # Error traces\n            infos = []\n            if not result.wasSuccessful():\n                self.stream.write(\"FAILED\")\n                failed, errored = map(len, (result.failures, result.errors))\n                if failed:\n                    infos.append(\"failures={0}\".format(failed))\n                if errored:\n                    infos.append(\"errors={0}\".format(errored))\n            else:\n                self.stream.write(\"OK\")\n\n            if skipped:\n                infos.append(\"skipped={0}\".format(skipped))\n            if expectedFails:\n                infos.append(\"expected failures={0}\".format(expectedFails))\n            if unexpectedSuccesses:\n                infos.append(\"unexpected successes={0}\".format(\n                    unexpectedSuccesses))\n\n            if infos:\n                self.stream.writeln(\" ({0})\".format(\", \".join(infos)))\n            else:\n                self.stream.write(\"\\n\")\n\n            # Generate reports\n            self.stream.writeln()\n            self.stream.writeln('Generating XML reports...')\n            result.generate_reports(self)\n        finally:\n            pass\n\n        return result\n\n\nclass XMLTestProgram(TestProgram):\n\n    def __init__(self, *args, **kwargs):\n        kwargs.setdefault('testRunner', XMLTestRunner)\n        self.warnings = None  # python2 fix\n        self._parseKnownArgs(kwargs)\n        super(XMLTestProgram, self).__init__(*args, **kwargs)\n\n    def _parseKnownArgs(self, kwargs):\n        argv = kwargs.get('argv')\n        if argv is None:\n            argv = sys.argv\n\n        # python2 argparse fix\n        parser = argparse.ArgumentParser(prog='xmlrunner')\n        group = parser.add_mutually_exclusive_group()\n        group.add_argument(\n            '-o', '--output', metavar='DIR',\n            help='Directory for storing XML reports (\\'.\\' default)')\n        group.add_argument(\n            '--output-file', metavar='FILENAME',\n            help='Filename for storing XML report')\n        parser.add_argument(\n            '--outsuffix', metavar='STRING',\n            help='Output suffix (timestamp is default)')\n        namespace, argv = parser.parse_known_args(argv)\n        self.output = namespace.output\n        self.output_file = namespace.output_file\n        self.outsuffix = namespace.outsuffix\n        kwargs['argv'] = argv\n\n    def _initArgParsers(self):\n        # this code path is only called in python3 (optparse vs argparse)\n        super(XMLTestProgram, self)._initArgParsers()\n\n        for parser in (self._main_parser, self._discovery_parser):\n            group = parser.add_mutually_exclusive_group()\n            group.add_argument(\n                '-o', '--output', metavar='DIR', nargs=1,\n                help='Directory for storing XML reports (\\'.\\' default)')\n            group.add_argument(\n                '--output-file', metavar='FILENAME', nargs=1,\n                help='Filename for storing XML report')\n            group.add_argument(\n                '--outsuffix', metavar='STRING', nargs=1,\n                help='Output suffix (timestamp is default)')\n\n    def runTests(self):\n        kwargs = dict(\n            verbosity=self.verbosity,\n            failfast=self.failfast,\n            buffer=self.buffer,\n            warnings=self.warnings,\n        )\n        if sys.version_info[:2] > (3, 4):\n            kwargs.update(tb_locals=self.tb_locals)\n\n        output_file = None\n        try:\n            if self.output_file is not None:\n                output_file = open(self.output_file, 'wb')\n                kwargs.update(output=output_file)\n            elif self.output is not None:\n                kwargs.update(output=self.output)\n\n            if self.outsuffix is not None:\n                kwargs.update(outsuffix=self.outsuffix)\n\n            self.testRunner = self.testRunner(**kwargs)\n            super(XMLTestProgram, self).runTests()\n        finally:\n            if output_file is not None:\n                output_file.close()\n"
  },
  {
    "path": "xmlrunner/unittest.py",
    "content": "\nfrom __future__ import absolute_import\n\nimport sys\n# pylint: disable-msg=W0611\nimport unittest\nfrom unittest import TextTestRunner\nfrom unittest import TestResult, TextTestResult\nfrom unittest.result import failfast\nfrom unittest.main import TestProgram\n\n\n__all__ = (\n    'unittest', 'TextTestRunner', 'TestResult', 'TextTestResult',\n    'TestProgram', 'failfast')\n"
  }
]