[
  {
    "path": ".github/CODEOWNERS",
    "content": "* @Hi-king @yokomotod @hirosassa @mski-iksm @kitagry @ujiuji1259 @mamo3gr @hiro-o918\n"
  },
  {
    "path": ".github/workflows/format.yml",
    "content": "name: Lint\n\non:\n    push:\n        branches: [ master ]\n    pull_request:\n\n\njobs:\n  formatting-check:\n\n    name: Lint\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v6\n    - name: Set up the latest version of uv\n      uses: astral-sh/setup-uv@v7\n      with:\n        enable-cache: true\n    - name: Install dependencies\n      run: |\n        uv tool install --python-preference only-managed --python 3.13 tox --with tox-uv\n    - name: Run ruff and mypy\n      run: |\n        uvx --with tox-uv tox run -e ruff,mypy\n"
  },
  {
    "path": ".github/workflows/publish.yml",
    "content": "name: Publish\n\non:\n  push:\n    tags: '*'\n\njobs:\n  deploy:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v6\n    - name: Set up the latest version of uv\n      uses: astral-sh/setup-uv@v7\n      with:\n        enable-cache: true\n    - name: Build and publish\n      env:\n        UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}\n      run: |\n        uv build\n        uv publish\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "name: Test\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n\njobs:\n  tests:\n    runs-on: ${{ matrix.platform }}\n    strategy:\n      max-parallel: 7\n      matrix:\n        platform: [\"ubuntu-latest\"]\n        tox-env: [\"py310\", \"py311\", \"py312\", \"py313\", \"py314\"]\n        include:\n          - platform: macos-15\n            tox-env: \"py313\"\n          - platform: macos-latest\n            tox-env: \"py313\"\n    steps:\n    - uses: actions/checkout@v6\n    - name: Set up the latest version of uv\n      uses: astral-sh/setup-uv@v7\n      with:\n        enable-cache: true\n    - name: Install dependencies\n      run: |\n        uv tool install --python-preference only-managed --python 3.13 tox --with tox-uv\n    - name: Test with tox\n      run: uvx --with tox-uv tox run -e ${{ matrix.tox-env }}\n"
  },
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n\n# pycharm\n.idea\n\n# gokart\nresources\nexamples/resources\n\n# poetry\ndist\n\n# temporary data\ntemporary.zip"
  },
  {
    "path": ".readthedocs.yaml",
    "content": "# Read the Docs configuration file for Sphinx projects\n# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details\n\n# Required\nversion: 2\n\n# Set the OS, Python version and other tools you might need\nbuild:\n  os: ubuntu-24.04\n  tools:\n    python: \"3.12\"\n\n# Build from the docs/ directory with Sphinx\nsphinx:\n  configuration: docs/conf.py\n\n# Optional but recommended, declare the Python requirements required\n# to build your documentation\n# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html\npython:\n  install:\n    - requirements: docs/requirements.txt\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2018 M3, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# gokart\n\n<p align=\"center\">\n  <img src=\"https://raw.githubusercontent.com/m3dev/gokart/master/docs/gokart_logo_side_isolation.svg\" width=\"90%\">\n<p>\n\n[![Test](https://github.com/m3dev/gokart/workflows/Test/badge.svg)](https://github.com/m3dev/gokart/actions?query=workflow%3ATest)\n[![](https://readthedocs.org/projects/gokart/badge/?version=latest)](https://gokart.readthedocs.io/en/latest/)\n[![Python Versions](https://img.shields.io/pypi/pyversions/gokart.svg)](https://pypi.org/project/gokart/)\n[![](https://img.shields.io/pypi/v/gokart)](https://pypi.org/project/gokart/)\n![](https://img.shields.io/pypi/l/gokart)\n\nGokart solves reproducibility, task dependencies, constraints of good code, and ease of use for Machine Learning Pipeline.\n\n\n[Documentation](https://gokart.readthedocs.io/en/latest/) for the latest release is hosted on readthedocs.\n\n\n# About gokart\n\nHere are some good things about gokart.\n\n- The following meta data for each Task is stored separately in a `pkl` file with hash value\n    - task output data\n    - imported all module versions\n    - task processing time\n    - random seed in task\n    - displayed log\n    - all parameters set as class variables in the task\n- Automatically rerun the pipeline if parameters of Tasks are changed.\n- Support GCS and S3 as a data store for intermediate results of Tasks in the pipeline.\n- The above output is exchanged between tasks as an intermediate file, which is memory-friendly\n- `pandas.DataFrame` type and column checking during I/O\n- Directory structure of saved files is automatically determined from structure of script\n- Seeds for numpy and random are automatically fixed\n- Can code while adhering to [SOLID](https://en.wikipedia.org/wiki/SOLID) principles as much as possible\n- Tasks are locked via redis even if they run in parallel\n\n**All the functions above are created for constructing Machine Learning batches. Provides an excellent environment for reproducibility and team development.**\n\n\nHere are some non-goal / downside of the gokart.\n- Batch execution in parallel is supported, but parallel and concurrent execution of task in memory.\n- Gokart is focused on reproducibility. So, I/O and capacity of data storage can become a bottleneck.\n- No support for task visualize.\n- Gokart is not an experiment management tool. The management of the execution result is cut out as [Thunderbolt](https://github.com/m3dev/thunderbolt).\n- Gokart does not recommend writing pipelines in toml, yaml, json, and more. Gokart is preferring to write them in Python.\n\n# Getting Started\n\nWithin the activated Python environment, use the following command to install gokart.\n\n```\npip install gokart\n```\n\n\n# Quickstart\n\n## Minimal Example\n\nA minimal gokart tasks looks something like this:\n\n\n```python\nimport gokart\n\nclass Example(gokart.TaskOnKart):\n    def run(self):\n        self.dump('Hello, world!')\n\ntask = Example()\noutput = gokart.build(task)\nprint(output)\n```\n\n`gokart.build` return the result of dump by `gokart.TaskOnKart`. The example will output the following.\n\n\n```\nHello, world!\n```\n\n## Type-Safe Pipeline Example\n\nWe introduce type-annotations to make a gokart pipeline robust.\nPlease check the following example to see how to use type-annotations on gokart.\nBefore using this feature, ensure to enable [mypy plugin](https://gokart.readthedocs.io/en/latest/mypy_plugin.html) feature in your project.\n\n```python\nimport gokart\n\n# `gokart.TaskOnKart[str]` means that the task dumps `str`\nclass StrDumpTask(gokart.TaskOnKart[str]):\n    def run(self):\n        self.dump('Hello, world!')\n\n# `gokart.TaskOnKart[int]` means that the task dumps `int`\nclass OneDumpTask(gokart.TaskOnKart[int]):\n    def run(self):\n        self.dump(1)\n\n# `gokart.TaskOnKart[int]` means that the task dumps `int`\nclass TwoDumpTask(gokart.TaskOnKart[int]):\n    def run(self):\n        self.dump(2)\n\nclass AddTask(gokart.TaskOnKart[int]):\n    # `a` requires a task to dump `int`\n    a: gokart.TaskInstanceParameter[gokart.TaskOnKart[int]] = gokart.TaskInstanceParameter()\n    # `b` requires a task to dump `int`\n    b: gokart.TaskInstanceParameter[gokart.TaskOnKart[int]] = gokart.TaskInstanceParameter()\n\n    def requires(self):\n        return dict(a=self.a, b=self.b)\n\n    def run(self):\n        # loading by instance parameter,\n        # `a` and `b` are treated as `int`\n        # because they are declared as `gokart.TaskOnKart[int]`\n        a = self.load(self.a)\n        b = self.load(self.b)\n        self.dump(a + b)\n\n\nvalid_task = AddTask(a=OneDumpTask(), b=TwoDumpTask())\n# the next line will show type error by mypy\n# because `StrDumpTask` dumps `str` and `AddTask` requires `int`\ninvalid_task = AddTask(a=OneDumpTask(), b=StrDumpTask())\n```\n\nThis is an introduction to some of the gokart.\nThere are still more useful features.\n\nPlease See [Documentation](https://gokart.readthedocs.io/en/latest/) .\n\nHave a good gokart life.\n\n# Achievements\n\nGokart is a proven product.\n\n- It's actually been used by [m3.inc](https://corporate.m3.com/en) for over 3 years\n- Natural Language Processing Competition by [Nishika.inc](https://nishika.com) 2nd prize : [Solution Repository](https://github.com/vaaaaanquish/nishika_akutagawa_2nd_prize)\n\n\n# Thanks\n\ngokart is a wrapper for luigi. Thanks to luigi and dependent projects!\n\n- [luigi](https://github.com/spotify/luigi)\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nSOURCEDIR     = .\nBUILDDIR      = _build\n\n# Put it first so that \"make\" without argument is like \"make help\".\nhelp:\n\t@$(SPHINXBUILD) -M help \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n\n.PHONY: help Makefile\n\n# Catch-all target: route all unknown targets to Sphinx using the new\n# \"make mode\" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).\n%: Makefile\n\t@$(SPHINXBUILD) -M $@ \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)"
  },
  {
    "path": "docs/conf.py",
    "content": "# https://github.com/sphinx-doc/sphinx/issues/6211\nimport luigi\n\nimport gokart\n\nluigi.task.Task.requires.__doc__ = gokart.task.TaskOnKart.requires.__doc__\nluigi.task.Task.output.__doc__ = gokart.task.TaskOnKart.output.__doc__\n\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('../gokart/'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'gokart'\ncopyright = '2019, Masahiro Nishiba'\nauthor = 'Masahiro Nishiba'\n\n# The short X.Y version\nversion = ''\n# The full version, including alpha/beta/rc tags\nrelease = ''\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself.  Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'gokartdoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n    # The paper size ('letterpaper' or 'a4paper').\n    #\n    # 'papersize': 'letterpaper',\n    # The font size ('10pt', '11pt' or '12pt').\n    #\n    # 'pointsize': '10pt',\n    # Additional stuff for the LaTeX preamble.\n    #\n    # 'preamble': '',\n    # Latex figure (float) alignment\n    #\n    # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n#  author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n    (master_doc, 'gokart.tex', 'gokart Documentation', 'Masahiro Nishiba', 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'gokart', 'gokart Documentation', [author], 1)]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n#  dir menu entry, description, category)\ntexinfo_documents = [\n    (master_doc, 'gokart', 'gokart Documentation', author, 'gokart', 'One line description of project.', 'Miscellaneous'),\n]\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n"
  },
  {
    "path": "docs/efficient_run_on_multi_workers.rst",
    "content": "How to improve efficiency when running on multiple workers\n===========================================================\n\nIf multiple worker nodes are running similar gokart pipelines in parallel, it is possible that the exact same task may be executed by multiple workers.\n(For example, when training multiple machine learning models with different parameters, the feature creation task in the first stage is expected to be exactly the same.)\n\nIt is inefficient to execute the same task on each of multiple worker nodes, so we want to avoid this.\nHere we introduce `should_lock_run` feature to improve this inefficiency.\n\n\n\nSuppress run() of the same task with `should_lock_run`\n------------------------------------------------------\nWhen `gokart.TaskOnKart.should_lock_run` is set to True, the task will fail if the same task is run()-ing by another worker.\nBy failing the task, other tasks that can be executed at that time are given priority.\nAfter that, the failed task is automatically re-executed.\n\n.. code:: python\n\n    class SampleTask2(gokart.TaskOnKart):\n        should_lock_run = True\n\n\nAdditional Option\n------------------\n\nSkip completed tasks with `complete_check_at_run`\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nBy setting `gokart.TaskOnKart.complete_check_at_run` to True, the existence of the cache can be rechecked at run() time.\n\nDefault is True, but if the check takes too much time, you can set to False to inactivate the check.\n\n.. code:: python\n\n    class SampleTask1(gokart.TaskOnKart):\n        complete_check_at_run = False\n    \n"
  },
  {
    "path": "docs/for_pandas.rst",
    "content": "For Pandas\n==========\n\nGokart has several features for Pandas.\n\n\nPandas Type Config\n------------------\n\nPandas has a feature that converts the type of column(s) automatically. This feature sometimes cause wrong result. To avoid unintentional type conversion of pandas, we can specify a column name to check the type of Task input and output in gokart.\n\n\n.. code:: python\n\n    from typing import Any, Dict\n    import pandas as pd\n    import gokart\n\n\n    # Please define a class which inherits `gokart.PandasTypeConfig`.\n    class SamplePandasTypeConfig(gokart.PandasTypeConfig):\n\n        @classmethod\n        def type_dict(cls) -> Dict[str, Any]:\n            return {'int_column': int}\n\n\n    class SampleTask(gokart.TaskOnKart[pd.DataFrame]):\n\n        def run(self):\n            # [PandasTypeError] because expected type is `int`, but `str` is passed.\n            df = pd.DataFrame(dict(int_column=['a']))\n            self.dump(df)\n\nThis is useful when dataframe has nullable columns because pandas auto-conversion often fails in such case.\n\nEasy to Load DataFrame\n----------------------\n\nThe :func:`~gokart.task.TaskOnKart.load` method is used to load input ``pandas.DataFrame``.\n\n.. code:: python\n\n    def requires(self):\n        return MakeDataFrameTask()\n\n    def run(self):\n        df = self.load()\n\nPlease refer to :func:`~gokart.task.TaskOnKart.load`.\n\n\nFail on empty DataFrame\n-----------------------\n\nWhen the :attr:`~gokart.task.TaskOnKart.fail_on_empty_dump` parameter is true, the :func:`~gokart.task.TaskOnKart.dump()` method raises :class:`~gokart.errors.EmptyDumpError` on trying to dump empty ``pandas.DataFrame``.\n\n\n.. code:: python\n\n    import gokart\n\n\n    class EmptyTask(gokart.TaskOnKart):\n        def run(self):\n            df = pd.DataFrame()\n            self.dump(df)\n\n\n::\n\n    $ python main.py EmptyTask --fail-on-empty-dump true\n    # EmptyDumpError\n    $ python main.py EmptyTask\n    # Task will be ran and outputs an empty dataframe\n\n\nEmpty caches sometimes hide bugs and let us spend much time debugging. This feature notifies us some bugs (including wrong datasources) in the early stage.\n\nPlease refer to :attr:`~gokart.task.TaskOnKart.fail_on_empty_dump`.\n"
  },
  {
    "path": "docs/gokart.rst",
    "content": "gokart package\n==============\n\nSubmodules\n----------\n\ngokart.file\\_processor module\n-----------------------------\n\n.. automodule:: gokart.file_processor\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\ngokart.info module\n------------------\n\n.. automodule:: gokart.info\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\ngokart.parameter module\n-----------------------\n\n.. automodule:: gokart.parameter\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\ngokart.run module\n-----------------\n\n.. automodule:: gokart.run\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\ngokart.s3\\_config module\n------------------------\n\n.. automodule:: gokart.s3_config\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\ngokart.target module\n--------------------\n\n.. automodule:: gokart.target\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\ngokart.task module\n------------------\n\n.. automodule:: gokart.task\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\ngokart.workspace\\_management module\n-----------------------------------\n\n.. automodule:: gokart.workspace_management\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\ngokart.zip\\_client module\n-------------------------\n\n.. automodule:: gokart.zip_client\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: gokart\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/index.rst",
    "content": ".. gokart documentation master file, created by\n   sphinx-quickstart on Fri Jan 11 07:59:25 2019.\n   You can adapt this file completely to your liking, but it should at least\n   contain the root `toctree` directive.\n\nWelcome to gokart's documentation!\n==================================\n\nUseful links: `GitHub <https://github.com/m3dev/gokart>`_ | `cookiecutter gokart <https://github.com/m3dev/cookiecutter-gokart>`_\n\n`Gokart <https://github.com/m3dev/gokart>`_ is a wrapper of the data pipeline library `luigi <https://github.com/spotify/luigi>`_. Gokart solves \"**reproducibility**\", \"**task dependencies**\", \"**constraints of good code**\", and \"**ease of use**\" for Machine Learning Pipeline.\n\n\nGood thing about gokart\n-----------------------\n\nHere are some good things about gokart.\n\n- The following data for each Task is stored separately in a pkl file with hash value\n    - task output data\n    - imported all module versions\n    - task processing time\n    - random seed in task\n    - displayed log\n    - all parameters set as class variables in the task\n- If change parameter of Task, rerun spontaneously.\n    - The above file will be generated with a different hash value\n    - The hash value of dependent task will also change and both will be rerun\n- Support GCS or S3\n- The above output is exchanged between tasks as an intermediate file, which is memory-friendly\n- pandas.DataFrame type and column checking during I/O\n- Directory structure of saved files is automatically determined from structure of script\n- Seeds for numpy and random are automatically fixed\n- Can code while adhering to SOLID principles as much as possible\n- Tasks are locked via redis even if they run in parallel\n\n**These are all functions baptized for creating Machine Learning batches. Provides an excellent environment for reproducibility and team development.**\n\n\n\nGetting started\n-----------------\n\n.. toctree::\n   :maxdepth: 2\n\n   intro_to_gokart\n   tutorial\n\nUser Guide\n-----------------\n\n.. toctree::\n   :maxdepth: 2\n\n   task_on_kart\n   task_parameters\n   setting_task_parameters\n   task_settings\n   task_information\n   logging\n   slack_notification\n   using_task_task_conflict_prevention_lock\n   efficient_run_on_multi_workers\n   for_pandas\n   polars\n   mypy_plugin\n\nAPI References\n--------------\n.. toctree::\n   :maxdepth: 2\n\n   gokart\n\n\nIndices and tables\n-------------------\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n"
  },
  {
    "path": "docs/intro_to_gokart.rst",
    "content": "Intro To Gokart\n===============\n\n\nInstallation\n------------\n\nWithin the activated Python environment, use the following command to install gokart.\n\n.. code:: sh\n\n    pip install gokart\n\n\n\nQuickstart\n----------\n\nA minimal gokart tasks looks something like this:\n\n\n.. code:: python\n\n    import gokart\n\n    class Example(gokart.TaskOnKart[str]):\n        def run(self):\n            self.dump('Hello, world!')\n\n    task = Example()\n    output = gokart.build(task)\n    print(output)\n\n\n``gokart.build`` return the result of dump by ``gokart.TaskOnKart``. The example will output the following.\n\n\n.. code:: sh\n\n    Hello, world!\n\n\n``gokart`` records all the information needed for Machine Learning. By default, ``resources`` will be generated in the same directory as the script.\n\n.. code:: sh\n\n    $ tree resources/\n    resources/\n    ├── __main__\n    │   └── Example_8441c59b5ce0113396d53509f19371fb.pkl\n    └── log\n        ├── module_versions\n        │   └── Example_8441c59b5ce0113396d53509f19371fb.txt\n        ├── processing_time\n        │   └── Example_8441c59b5ce0113396d53509f19371fb.pkl\n        ├── random_seed\n        │   └── Example_8441c59b5ce0113396d53509f19371fb.pkl\n        ├── task_log\n        │   └── Example_8441c59b5ce0113396d53509f19371fb.pkl\n        └── task_params\n            └── Example_8441c59b5ce0113396d53509f19371fb.pkl\n\n\nThe result of dumping the task will be saved in the ``__name__`` directory.\n\n\n.. code:: python\n\n    import pickle\n\n    with open('resources/__main__/Example_8441c59b5ce0113396d53509f19371fb.pkl', 'rb') as f:\n        print(pickle.load(f))  # Hello, world!\n\n\nThat will be given hash value depending on the parameter of the task. This means that if you change the parameter of the task, the hash value will change, and change output file. This is very useful when changing parameters and experimenting. Please refer to :doc:`task_parameters` section for task parameters. Also see :doc:`task_on_kart` section for information on how to return this output destination.\n\n\nIn addition, the following files are automatically saved as ``log``.\n\n- ``module_versions``: The versions of all modules that were imported when the script was executed. For reproducibility.\n- ``processing_time``: The execution time of the task.\n- ``random_seed``: This is random seed of python and numpy. For reproducibility in Machine Learning. Please refer to :doc:`task_settings` section.\n- ``task_log``: This is the output of the task logger.\n- ``task_params``: This is task's parameters. Please refer to :doc:`task_parameters` section.\n\n\nHow to running task\n-------------------\n\nGokart has ``run`` and ``build`` methods for running task. Each has a different purpose.\n\n- ``gokart.run``: uses arguments on the shell. return retcode.\n- ``gokart.build``: uses inline code on jupyter notebook, IPython, and more. return task output.\n\n\n.. note::\n\n    It is not recommended to use ``gokart.run`` and ``gokart.build`` together in the same script. Because ``gokart.build`` will clear the contents of ``luigi.register``. It's the only way to handle duplicate tasks.\n\n\ngokart.run\n~~~~~~~~~~\n\nThe :func:`~gokart.run` is running on shell.\n\n.. code:: python\n\n    import gokart\n    import luigi\n\n    class SampleTask(gokart.TaskOnKart[str]):\n        param = luigi.Parameter()\n\n        def run(self):\n            self.dump(self.param)\n\n    gokart.run()\n\n\n.. code:: sh\n\n    python sample.py SampleTask --local-scheduler --param=hello\n\n\nIf you were to write it in Python, it would be the same as the following behavior.\n\n\n.. code:: python\n\n    gokart.run(['SampleTask', '--local-scheduler', '--param=hello'])\n\n\ngokart.build\n~~~~~~~~~~~~\n\nThe :func:`~gokart.build` is inline code.\n\n.. code:: python\n\n    import gokart\n    import luigi\n\n    class SampleTask(gokart.TaskOnKart[str]):\n        param: luigi.Parameter = luigi.Parameter()\n\n        def run(self):\n            self.dump(self.param)\n\n    gokart.build(SampleTask(param='hello'), return_value=False)\n\n\nTo output logs of each tasks, you can pass `~log_level` parameter to `~gokart.build` as follows:\n\n.. code:: python\n\n    gokart.build(SampleTask(param='hello'), return_value=False, log_level=logging.DEBUG)\n\n\nThis feature is very useful for running `~gokart` on jupyter notebook.\nWhen some tasks are failed, gokart.build raises GokartBuildError. If you have to get tracebacks, you should set `log_level` as `logging.DEBUG`.\n"
  },
  {
    "path": "docs/logging.rst",
    "content": "Logging\n=======\n\nHow to set up a common logger for gokart.\n\n\nCore settings\n-------------\n\nPlease write a configuration file similar to the following:\n\n::\n\n    # base.ini\n    [core]\n    logging_conf_file=./conf/logging.ini\n\n.. code:: python\n\n    import gokart\n    gokart.add_config('base.ini')\n\n\nLogger ini file\n---------------\n\nIt is the same as a general logging.ini file.\n\n::\n\n    [loggers]\n    keys=root,luigi,luigi-interface,gokart,gokart.file_processor\n\n    [handlers]\n    keys=stderrHandler\n\n    [formatters]\n    keys=simpleFormatter\n\n    [logger_root]\n    level=INFO\n    handlers=stderrHandler\n\n    [logger_gokart]\n    level=INFO\n    handlers=stderrHandler\n    qualname=gokart\n    propagate=0\n\n    [logger_luigi]\n    level=INFO\n    handlers=stderrHandler\n    qualname=luigi\n    propagate=0\n\n    [logger_luigi-interface]\n    level=INFO\n    handlers=stderrHandler\n    qualname=luigi-interface\n    propagate=0\n\n    [logger_gokart.file_processor]\n    level=CRITICAL\n    handlers=stderrHandler\n    qualname=gokart.file_processor\n\n    [handler_stderrHandler]\n    class=StreamHandler\n    formatter=simpleFormatter\n    args=(sys.stdout,)\n\n    [formatter_simpleFormatter]\n    format=[%(asctime)s][%(name)s][%(levelname)s](%(filename)s:%(lineno)s) %(message)s\n    datefmt=%Y/%m/%d %H:%M:%S\n\nPlease refer to `Python logging documentation <https://docs.python.org/3/library/logging.config.html>`_\n"
  },
  {
    "path": "docs/make.bat",
    "content": "@ECHO OFF\r\n\r\npushd %~dp0\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sphinx-build\r\n)\r\nset SOURCEDIR=.\r\nset BUILDDIR=_build\r\n\r\nif \"%1\" == \"\" goto help\r\n\r\n%SPHINXBUILD% >NUL 2>NUL\r\nif errorlevel 9009 (\r\n\techo.\r\n\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx\r\n\techo.installed, then set the SPHINXBUILD environment variable to point\r\n\techo.to the full path of the 'sphinx-build' executable. Alternatively you\r\n\techo.may add the Sphinx directory to PATH.\r\n\techo.\r\n\techo.If you don't have Sphinx installed, grab it from\r\n\techo.http://sphinx-doc.org/\r\n\texit /b 1\r\n)\r\n\r\n%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%\r\ngoto end\r\n\r\n:help\r\n%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%\r\n\r\n:end\r\npopd\r\n"
  },
  {
    "path": "docs/mypy_plugin.rst",
    "content": "[Experimental] Mypy plugin\n===========================\n\nMypy plugin provides type checking for gokart tasks using Mypy.\nThis feature is experimental.\n\nHow to use\n--------------\n\nConfigure Mypy to use this plugin by adding the following to your ``mypy.ini`` file:\n\n.. code:: ini\n\n    [mypy]\n    plugins = gokart.mypy:plugin\n\nor by adding the following to your ``pyproject.toml`` file:\n\n.. code:: toml\n\n    [tool.mypy]\n    plugins = [\"gokart.mypy\"]\n\nThen, run Mypy as usual.\n\nExamples\n--------\n\nFor example the following code linted by Mypy:\n\n.. code:: python\n\n    import gokart\n    import luigi\n\n\n    class Foo(gokart.TaskOnKart):\n        # NOTE: must all the parameters be annotated\n        foo: int = luigi.IntParameter(default=1)\n        bar: str = luigi.Parameter()\n\n\n\n    Foo(foo=1, bar='2')   # OK\n    Foo(foo='1') # NG because foo is not int and bar is missing\n\n\nMypy plugin checks TaskOnKart generic types.\n\n.. code:: python\n\n    class SampleTask(gokart.TaskOnKart):\n        str_task: gokart.TaskOnKart[str] = gokart.TaskInstanceParameter()\n        int_task: gokart.TaskOnKart[int] = gokart.TaskInstanceParameter()\n\n        def requires(self):\n            return dict(str=self.str_task, int=self.int_task)\n\n        def run(self):\n            s = self.load(self.str_task)  # This type is inferred with \"str\"\n            i = self.load(self.int_task)  # This type is inferred with \"int\"\n\n    SampleTask(\n        str_task=StrTask(),  # mypy ok\n        int_task=StrTask(),  # mypy error: Argument \"int_task\" to \"StrTask\" has incompatible type \"StrTask\"; expected \"TaskOnKart[int]\n    )\n\nConfigurations (only pyproject.toml)\n-----------------------------------\n\nYou can configure the Mypy plugin using the ``pyproject.toml`` file.\nThe following options are available:\n\n.. code:: toml\n\n    [tool.gokart-mypy]\n    # If true, Mypy will raise an error if a task is missing required parameters.\n    # This configuration causes an error when the parameters set by `luigi.Config()`\n    # Default: false\n    disallow_missing_parameters = true\n"
  },
  {
    "path": "docs/polars.rst",
    "content": "Polars Support\n==============\n\nGokart supports Polars DataFrames alongside pandas DataFrames for DataFrame-based file processors. This allows gradual migration from pandas to Polars or using both libraries simultaneously in your data pipelines.\n\n\nInstallation\n------------\n\nPolars support is optional. Install it with:\n\n.. code:: bash\n\n    pip install gokart[polars]\n\nOr install Polars separately:\n\n.. code:: bash\n\n    pip install polars\n\n\nBasic Usage\n-----------\n\nTo use Polars DataFrames with gokart, specify ``dataframe_type='polars'`` when creating file processors:\n\n.. code:: python\n\n    import polars as pl\n    from gokart import TaskOnKart\n    from gokart.file_processor import FeatherFileProcessor\n\n    class MyPolarsTask(TaskOnKart[pl.DataFrame]):\n        def output(self):\n            return self.make_target(\n                'path/to/target.feather',\n                processor=FeatherFileProcessor(\n                    store_index_in_feather=False,\n                    dataframe_type='polars'\n                )\n            )\n\n        def run(self):\n            df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n            self.dump(df)\n\n\nSupported File Processors\n--------------------------\n\nThe following file processors support the ``dataframe_type`` parameter:\n\nCsvFileProcessor\n^^^^^^^^^^^^^^^^\n\n.. code:: python\n\n    from gokart.file_processor import CsvFileProcessor\n\n    # For Polars\n    processor = CsvFileProcessor(sep=',', encoding='utf-8', dataframe_type='polars')\n\n    # For pandas (default)\n    processor = CsvFileProcessor(sep=',', encoding='utf-8', dataframe_type='pandas')\n    # or simply\n    processor = CsvFileProcessor(sep=',', encoding='utf-8')\n\n\nJsonFileProcessor\n^^^^^^^^^^^^^^^^^\n\n.. code:: python\n\n    from gokart.file_processor import JsonFileProcessor\n\n    # For Polars\n    processor = JsonFileProcessor(orient='records', dataframe_type='polars')\n\n    # For pandas (default)\n    processor = JsonFileProcessor(orient='records', dataframe_type='pandas')\n\n\nParquetFileProcessor\n^^^^^^^^^^^^^^^^^^^^\n\n.. code:: python\n\n    from gokart.file_processor import ParquetFileProcessor\n\n    # For Polars\n    processor = ParquetFileProcessor(\n        compression='gzip',\n        dataframe_type='polars'\n    )\n\n    # For pandas (default)\n    processor = ParquetFileProcessor(\n        compression='gzip',\n        dataframe_type='pandas'\n    )\n\n\nFeatherFileProcessor\n^^^^^^^^^^^^^^^^^^^^\n\n.. code:: python\n\n    from gokart.file_processor import FeatherFileProcessor\n\n    # For Polars\n    processor = FeatherFileProcessor(\n        store_index_in_feather=False,\n        dataframe_type='polars'\n    )\n\n    # For pandas (default)\n    processor = FeatherFileProcessor(\n        store_index_in_feather=True,\n        dataframe_type='pandas'\n    )\n\n.. note::\n    The ``store_index_in_feather`` parameter is pandas-specific and is ignored when using Polars.\n\n\nUsing Pandas and Polars Together\n---------------------------------\n\nSince projects often migrate from pandas gradually, gokart allows you to use both pandas and Polars simultaneously:\n\n.. code:: python\n\n    import pandas as pd\n    import polars as pl\n    from gokart import TaskOnKart\n    from gokart.file_processor import FeatherFileProcessor\n\n    class PandasTask(TaskOnKart[pd.DataFrame]):\n        \"\"\"Task that outputs pandas DataFrame\"\"\"\n        def output(self):\n            return self.make_target(\n                'path/to/pandas_output.feather',\n                processor=FeatherFileProcessor(\n                    store_index_in_feather=False,\n                    dataframe_type='pandas'\n                )\n            )\n\n        def run(self):\n            df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n            self.dump(df)\n\n    class PolarsTask(TaskOnKart[pl.DataFrame]):\n        \"\"\"Task that outputs Polars DataFrame\"\"\"\n        def requires(self):\n            return PandasTask()\n\n        def output(self):\n            return self.make_target(\n                'path/to/polars_output.feather',\n                processor=FeatherFileProcessor(\n                    store_index_in_feather=False,\n                    dataframe_type='polars'\n                )\n            )\n\n        def run(self):\n            # Load pandas DataFrame and convert to Polars\n            pandas_df = self.load()  # Returns pandas DataFrame\n            polars_df = pl.from_pandas(pandas_df)\n\n            # Process with Polars\n            result = polars_df.with_columns(\n                (pl.col('a') * 2).alias('a_doubled')\n            )\n\n            self.dump(result)\n\n\nDefault Behavior\n----------------\n\nWhen ``dataframe_type`` is not specified, file processors default to ``'pandas'`` for backward compatibility:\n\n.. code:: python\n\n    # These are equivalent\n    processor = CsvFileProcessor(sep=',')\n    processor = CsvFileProcessor(sep=',', dataframe_type='pandas')\n\n\nImportant Notes\n---------------\n\n**File Format Compatibility**\n\nFiles created with Polars processors can be read by pandas processors and vice versa. The underlying file formats (CSV, JSON, Parquet, Feather) are library-agnostic.\n\n**Pandas-specific Features**\n\nSome pandas-specific features are not available with Polars:\n\n- ``store_index_in_feather`` parameter in ``FeatherFileProcessor`` is ignored for Polars\n- ``engine`` parameter in ``ParquetFileProcessor`` is ignored for Polars (uses Polars' default)\n\n**Error Handling**\n\nIf you specify ``dataframe_type='polars'`` but Polars is not installed, you'll get an ``ImportError`` with installation instructions:\n\n.. code:: text\n\n    ImportError: polars is required for dataframe_type='polars'. Install with: pip install polars\n\n\nMigration Strategy\n------------------\n\nRecommended approach for migrating from pandas to Polars:\n\n1. Install Polars: ``pip install gokart[polars]``\n2. Create new tasks using ``dataframe_type='polars'``\n3. Keep existing tasks with ``dataframe_type='pandas'`` or default behavior\n4. Gradually migrate tasks as needed\n5. Convert DataFrames between libraries using ``pl.from_pandas()`` and ``df.to_pandas()`` when necessary\n"
  },
  {
    "path": "docs/requirements.txt",
    "content": "Sphinx\ngokart\nsphinx-rtd-theme\n"
  },
  {
    "path": "docs/setting_task_parameters.rst",
    "content": "============================\nSetting Task Parameters\n============================\n\nThere are several ways to set task parameters.\n\n- Set parameter from command line\n- Set parameter at config file\n- Set parameter at upstream task\n- Inherit parameter from other task\n\n\nSet parameter from command line\n==================================\n.. code:: sh\n\n    python main.py sample.SomeTask --SomeTask-param=Hello\n\nParameter of each task can be set as a command line parameter in ``--[task name]-[parameter name]=[value]`` format.\n\n\nSet parameter at config file\n==================================\n::\n\n    [sample.SomeTask]\n    param = Hello\n\nAbove config file (``config.ini``) must be read before ``gokart.run()`` as the following code:\n\n.. code:: python\n\n    if __name__ == '__main__':\n        gokart.add_config('./conf/config.ini')\n        gokart.run()\n\n\nIt can also be loaded from environment variable as the following code:\n\n::\n\n    [sample.SomeTask]\n    param=${PARAMS}\n\n    [TaskOnKart]\n    workspace_directory=${WORKSPACE_DIRECTORY}\n\nThe advantages of using environment variables are 1) important information will not be logged 2) common settings can be used.\n\n\nSet parameter at upstream task\n==================================\n\nParameters can be set at the upstream task, as in a typical pipeline.\n\n.. code:: python\n\n    class UpstreamTask(gokart.TaskOnKart):\n        def requires(self):\n            return dict(sometask=SomeTask(param='Hello'))\n\n\nInherit parameter from other task\n==================================\n\nParameter values can be inherited from other task using ``@inherits_config_params`` decorator.\n\n.. code:: python\n\n    class MasterConfig(luigi.Config):\n        param: luigi.Parameter = luigi.Parameter()\n        param2: luigi.Parameter = luigi.Parameter()\n\n    @inherits_config_params(MasterConfig)\n    class SomeTask(gokart.TaskOnKart):\n        param: luigi.Parameter = luigi.Parameter()\n\n\nThis is useful when multiple tasks has the same parameter. In the above example, parameter settings of ``MasterConfig`` will be inherited to all tasks decorated with ``@inherits_config_params(MasterConfig)`` as ``SomeTask``.\n\nNote that only parameters which exist in both ``MasterConfig`` and ``SomeTask`` will be inherited.\nIn the above example, ``param2`` will not be available in ``SomeTask``, since ``SomeTask`` does not have ``param2`` parameter.\n\n.. code:: python\n\n    class MasterConfig(luigi.Config):\n        param: luigi.Parameter = luigi.Parameter()\n        param2: luigi.Parameter = luigi.Parameter()\n\n    @inherits_config_params(MasterConfig, parameter_alias={'param2': 'param3'})\n    class SomeTask(gokart.TaskOnKart):\n        param3: luigi.Parameter = luigi.Parameter()\n\n\nYou may also set a parameter name alias by setting ``parameter_alias``.\n``parameter_alias`` must be a dictionary of key: inheriting task's parameter name, value: decorating task's parameter name.\n\nIn the above example, ``SomeTask.param3`` will be set to same value as ``MasterConfig.param2``.\n"
  },
  {
    "path": "docs/slack_notification.rst",
    "content": "Slack notification\n=========================\n\nPrerequisites\n-------------\n\nPrepare following environmental variables:\n\n.. code:: sh\n\n   export SLACK_TOKEN=xoxb-your-token    // should use token starts with \"xoxb-\" (bot token is preferable)\n   export SLACK_CHANNEL=channel-name     // not \"#channel-name\", just \"channel-name\"\n\n\nA Slack bot token can obtain from `slack app document <https://api.slack.com/apps>`_.\n\nA bot token needs following scopes:\n\n- `channels:read`\n- `chat:write`\n- `files:write`\n\nMore about scopes are `slack scopes document <https://api.slack.com/scopes>`_.\n\nImplement Slack notification\n----------------------------\n\nWrite following codes pass arguments to your gokart workflow.\n\n.. code:: python\n\n    cmdline_args = sys.argv[1:]\n    if 'SLACK_CHANNEL' in os.environ:\n        cmdline_args.append(f'--SlackConfig-channel={os.environ[\"SLACK_CHANNEL\"]}')\n    if 'SLACK_TO_USER' in os.environ:\n        cmdline_args.append(f'--SlackConfig-to-user={os.environ[\"SLACK_TO_USER\"]}')\n    gokart.run(cmdline_args)\n\n"
  },
  {
    "path": "docs/task_information.rst",
    "content": "Task Information\n================\n\nThere are 6 ways to print the significant parameters and state of the task and its dependencies.\n\n* 1. One is to use luigi module. See `luigi.tools.deps_tree module <https://luigi.readthedocs.io/en/stable/api/luigi.tools.deps_tree.html>`_ for details.\n* 2. ``task-info`` option of ``gokart.run()``.\n* 3. ``make_task_info_as_tree_str()`` will return significant parameters and dependency tree as str.\n* 4. ``make_task_info_as_table()`` will return significant parameter and dependent tasks as pandas.DataFrame table format.\n* 5. ``dump_task_info_table()`` will dump the result of ``make_task_info_as_table()`` to a file.\n* 6. ``dump_task_info_tree()`` will dump the task tree object (TaskInfo) to a pickle file.\n\n\nThis document will cover 2~6.\n\n2. task-info option of gokart.run()\n--------------------------------------------\n\nOn CLI\n~~~~~~\n\nAn example implementation could be like:\n\n.. code:: python\n\n    # main.py\n\n    import gokart\n\n    if __name__ == '__main__':\n        gokart.run()\n\n\n.. code:: sh\n\n    $ python main.py \\\n        TaskB \\\n        --param=Hello \\\n        --local-scheduler \\\n        --tree-info-mode=all \\\n        --tree-info-output-path=tree_all.txt\n\n\nThe ``--tree-info-mode`` option accepts \"simple\" and \"all\", and a task information is saved in ``--tree-info-output-path``.\n\nwhen \"simple\" is passed, it outputs the states and the unique ids of tasks.\nAn example output is as follows:\n\n.. code:: text\n\n    └─-(COMPLETE) TaskB[09fe5591ef2969ce7443c419a3b19e5d]\n       └─-(COMPLETE) TaskA[2549878535c070fb6c3cd4061bdbbcff]\n\n\n\nWhen \"all\" is passed, it outputs the states, the unique ids, the significant parameters, the execution times and the task logs of tasks.\nAn example output is as follows:\n\n.. code:: text\n\n    └─-(COMPLETE) TaskB[09fe5591ef2969ce7443c419a3b19e5d](parameter={'workspace_directory': './resources/', 'local_temporary_directory': './resources/tmp/', 'param': 'Hello'}, output=['./resources/output_of_task_b_09fe5591ef2969ce7443c419a3b19e5d.pkl'], time=0.002290010452270508s, task_log={})\n       └─-(COMPLETE) TaskA[2549878535c070fb6c3cd4061bdbbcff](parameter={'workspace_directory': './resources/', 'local_temporary_directory': './resources/tmp/', 'param': 'called by TaskB'}, output=['./resources/output_of_task_a_2549878535c070fb6c3cd4061bdbbcff.pkl'], time=0.0009829998016357422s, task_log={})\n\n\n\n3. make_task_info_as_tree_str()\n-----------------------------------------\n\n``gokart.tree.task_info.make_task_info_as_tree_str()`` will return a tree dependency tree as a str.\n\n.. code:: python\n\n    from gokart.tree.task_info import make_task_info_as_tree_str\n\n    make_task_info_as_tree_str(task, ignore_task_names)\n    # Parameters\n    # ----------\n    # - task: TaskOnKart\n    #     Root task.\n    # - details: bool\n    #     Whether or not to output details.\n    # - abbr: bool\n    #     Whether or not to simplify tasks information that has already appeared.\n    # - ignore_task_names: Optional[List[str]]\n    #     List of task names to ignore.\n    # Returns\n    # -------\n    # - tree_info : str\n    #     Formatted task dependency tree.\n\n\nexample\n\n.. code:: python\n\n    import luigi\n    import gokart\n\n    class TaskA(gokart.TaskOnKart[str]):\n        param = luigi.Parameter()\n        def run(self):\n            self.dump(f'{self.param}')\n\n    class TaskB(gokart.TaskOnKart[str]):\n        task: gokart.TaskOnKart[str] = gokart.TaskInstanceParameter()\n        def run(self):\n            task = self.load('task')\n            self.dump(task + ' taskB')\n\n    class TaskC(gokart.TaskOnKart[str]):\n        task: gokart.TaskOnKart[str] = gokart.TaskInstanceParameter()\n        def run(self):\n            task = self.load('task')\n            self.dump(task + ' taskC')\n\n    class TaskD(gokart.TaskOnKart):\n        task1: gokart.TaskOnKart[str] = gokart.TaskInstanceParameter()\n        task2: gokart.TaskOnKart[str] = gokart.TaskInstanceParameter()\n        def run(self):\n            task = [self.load('task1'), self.load('task2')]\n            self.dump(','.join(task))\n\n\n.. code:: python\n\n    task = TaskD(\n        task1=TaskD(\n            task1=TaskD(task1=TaskC(task=TaskA(param='foo')), task2=TaskC(task=TaskB(task=TaskA(param='bar')))),  # same task\n            task2=TaskD(task1=TaskC(task=TaskA(param='foo')), task2=TaskC(task=TaskB(task=TaskA(param='bar'))))   # same task\n        ),\n        task2=TaskD(\n            task1=TaskD(task1=TaskC(task=TaskA(param='foo')), task2=TaskC(task=TaskB(task=TaskA(param='bar')))),  # same task\n            task2=TaskD(task1=TaskC(task=TaskA(param='foo')), task2=TaskC(task=TaskB(task=TaskA(param='bar'))))   # same task\n        )\n    )\n    print(gokart.make_task_info_as_tree_str(task))\n\n\n.. code:: sh\n\n    └─-(PENDING) TaskD[187ff82158671283e127e2e1f7c9c095]\n        |--(PENDING) TaskD[ca9e943ce049e992b371898c0578784e]    # duplicated TaskD\n        |  |--(PENDING) TaskD[1cc9f9fc54a56614f3adef74398684f4]    # duplicated TaskD\n        |  |  |--(PENDING) TaskC[dce3d8e7acaf1bb9731fb4f2ae94e473]\n        |  |  |  └─-(PENDING) TaskA[be65508b556dd3752359b4246791413d]\n        |  |  └─-(PENDING) TaskC[de39593d31490aba3cdca3c650432504]\n        |  |     └─-(PENDING) TaskB[bc2f7d6cdd6521cc116c35f0f144eed3]\n        |  |        └─-(PENDING) TaskA[5a824f7d232eb69d46f0ac6bbd93b565]\n        |  └─-(PENDING) TaskD[1cc9f9fc54a56614f3adef74398684f4]\n        |     └─- ...\n        └─-(PENDING) TaskD[ca9e943ce049e992b371898c0578784e]\n            └─- ...\n\n\nIn the above example, the sub-trees already shown is omitted.\nThis can be disabled by passing ``False`` to ``abbr`` flag:\n\n.. code:: python\n\n    print(make_task_info_as_tree_str(task, abbr=False))\n\n\n4. make_task_info_as_table()\n--------------------------------\n\n``gokart.tree.task_info.make_task_info_as_table()`` will return a table containing the information of significant parameters and dependent tasks as a pandas DataFrame.\nThis table contains `task name`, `cache unique id`, `cache file path`, `task parameters`, `task processing time`, `completed flag`, and `task log`.\n\n.. code:: python\n\n    from gokart.tree.task_info import make_task_info_as_table\n\n    make_task_info_as_table(task, ignore_task_names)\n    # \"\"\"Return a table containing information about dependent tasks.\n    #\n    # Parameters\n    # ----------\n    # - task: TaskOnKart\n    #     Root task.\n    # - ignore_task_names: Optional[List[str]]\n    #     List of task names to ignore.\n    # Returns\n    # -------\n    # - task_info_table : pandas.DataFrame\n    #     Formatted task dependency table.\n    # \"\"\"\n\n\n5. dump_task_info_table()\n-----------------------------------------\n\n``gokart.tree.task_info.dump_task_info_table()`` will dump the task_info table made at ``make_task_info_as_table()`` to a file.\n\n.. code:: python\n\n    from gokart.tree.task_info import dump_task_info_table\n\n    dump_task_info_table(task, task_info_dump_path, ignore_task_names)\n    # Parameters\n    # ----------\n    # - task: TaskOnKart\n    #     Root task.\n    # - task_info_dump_path: str\n    #     Output target file path. Path destination can be `local`, `S3`, or `GCS`.\n    #     File extension can be any type that gokart file processor accepts, including `csv`, `pickle`, or `txt`.\n    #     See `TaskOnKart.make_target module <https://gokart.readthedocs.io/en/latest/task_on_kart.html#taskonkart-make-target>` for details.\n    # - ignore_task_names: Optional[List[str]]\n    #     List of task names to ignore.\n    # Returns\n    # -------\n    # None\n\n\n6. dump_task_info_tree()\n-----------------------------------------\n\n``gokart.tree.task_info.dump_task_info_tree()`` will dump the task tree object (TaskInfo) to a pickle file.\n\n.. code:: python\n\n    from gokart.tree.task_info import dump_task_info_tree\n\n    dump_task_info_tree(task, task_info_dump_path, ignore_task_names, use_unique_id)\n    # Parameters\n    # ----------\n    # - task: TaskOnKart\n    #     Root task.\n    # - task_info_dump_path: str\n    #     Output target file path. Path destination can be `local`, `S3`, or `GCS`.\n    #     File extension must be '.pkl'.\n    # - ignore_task_names: Optional[List[str]]\n    #     List of task names to ignore.\n    # - use_unique_id: bool = True\n    #     Whether to use unique id to dump target file. Default is True.\n    # Returns\n    # -------\n    # None\n\n\nTask Logs\n---------\nTo output extra information of tasks by ``tree-info``, the member variable :attr:`~gokart.task.TaskOnKart.task_log` of ``TaskOnKart`` keeps any information as a dictionary.\n\nFor instance, the following code runs,\n\n.. code:: python\n\n    import gokart\n\n\n    class SampleTaskLog(gokart.TaskOnKart):\n        def run(self):\n            # Add some logs.\n            self.task_log['sample key'] = 'sample value'\n\n\n    if __name__ == '__main__':\n        SampleTaskLog().run()\n        gokart.run([\n            '--tree-info-mode=all',\n            '--tree-info-output-path=sample_task_log.txt',\n            'SampleTaskLog',\n            '--local-scheduler'])\n\n\nthe output could be like:\n\n.. code:: text\n\n    └─-(COMPLETE) SampleTaskLog[...](..., task_log={'sample key': 'sample value'})\n\n\nDelete Unnecessary Output Files\n--------------------------------\nTo delete output files which are not necessary to run a task, add option ``--delete-unnecessary-output-files``. This option is supported only when a task outputs files in local storage not S3 for now.\n"
  },
  {
    "path": "docs/task_on_kart.rst",
    "content": "TaskOnKart\n==========\n``TaskOnKart`` inherits ``luigi.Task``, and has functions to make it easy to define tasks.\nPlease see `luigi documentation <https://luigi.readthedocs.io/en/stable/index.html>`_ for details of ``luigi.Task``.\n\nPlease refer to :doc:`intro_to_gokart` section and :doc:`tutorial` section.\n\n\nOutline\n--------\nHow ``TaskOnKart`` helps to define a task looks like:\n\n.. code:: python\n\n    import luigi\n    import gokart\n\n\n    class TaskA(gokart.TaskOnKart[str]):\n        param: luigi.Parameter = luigi.Parameter()\n\n        def output(self):\n            return self.make_target('output_of_task_a.pkl')\n\n        def run(self):\n            results = f'param={self.param}'\n            self.dump(results)\n\n\n    class TaskB(gokart.TaskOnKart[str]):\n        param: luigi.Parameter = luigi.Parameter()\n\n        def requires(self):\n            return TaskA(param='world')\n\n        def output(self):\n            # `make_target` makes an instance of `luigi.Target`.\n            # This infers the output format and the destination of an output objects.\n            # The target file path is\n            #     '{self.workspace_directory}/output_of_task_b_{self.make_unique_id()}.pkl'.\n            return self.make_target('output_of_task_b.pkl')\n\n        def run(self):\n            # `load` loads input data. In this case, this loads the output of `TaskA`.\n            output_of_task_a = self.load()\n            results = f'Task A: {output_of_task_a}\\nTaskB: param={self.param}'\n            # `dump` writes `results` to the file path of `self.output()`.\n            self.dump(results)\n\n\n    if __name__ == '__main__':\n        print(gokart.build([TaskB(param='Hello')]))\n\n\nThe result of this script will look like this\n\n.. code:: sh\n\n    Task A: param=world\n    Task B: param=Hello\n\nThe results are obtained as a pipeline by linking A and B.\n\n\nTaskOnKart.make_target\n----------------------\nThe :func:`~gokart.task.TaskOnKart.make_target` method is used to make an instance of ``Luigi.Target``.\nFor instance, an example implementation could be as follows:\n\n.. code:: python\n\n    def output(self):\n        return self.make_target('file_name.pkl')\n\nThe ``make_target`` method adds ``_{self.make_unique_id()}`` to the file name as suffix.\nIn this case, the target file path is ``{self.workspace_directory}/file_name_{self.make_unique_id()}.pkl``.\n\n\nIt is also possible to specify a file format other than pkl. The supported file formats are as follows:\n\n- .pkl\n- .txt\n- .csv\n- .tsv\n- .gz\n- .json\n- .xml\n- .npz\n- .parquet\n- .feather\n- .png\n- .jpg\n- .ini\n\n\nIf dump something other than the above, can use :func:`~gokart.TaskOnKart.make_model_target`.\nPlease refer to :func:`~gokart.task.TaskOnKart.make_target` and described later Advanced Features section.\n\n\n.. note::\n    By default, file path is inferred from \"__name__\" of the script, so ``output`` method can be omitted.\n    Please refer to :doc:`tutorial` section.\n\n.. note::\n    When using `.feather`, index will be converted to column at saving and restored to index at loading.\n    If you don't prefere saving index, set `store_index_in_feather=False` parameter at `gokart.target.make_target()`.\n\n.. note::\n    When you set `serialized_task_definition_check=True`, the task will rerun when you modify the scripts of the task.\n    Please note that the scripts outside the class are not considered.\n\n\n\nTaskOnKart.load\n----------------\nThe :func:`~gokart.task.TaskOnKart.load` method is used to load input data.\nFor instance, an example implementation could be as follows:\n\n.. code:: python\n\n    def requires(self):\n        return TaskA(param='called by TaskB')\n\n    def run(self):\n        # `load` loads input data. In this case, this loads the output of `TaskA`.\n        output_of_task_a = self.load()\n\n\nIn the case that a task requires 2 or more tasks as input, the return value of this method has the same structure with `requires` value.\nFor instance, an example implementation that `requires` returns a dictionary of tasks could be like follows:\n\n.. code:: python\n\n    def requires(self):\n        return dict(a=TaskA(), b=TaskB())\n\n    def run(self):\n        data = self.load() # returns dict(a=self.load('a'), b=self.load('b'))\n\n\nThe `load` method loads individual task input by passing a key of an input dictionary as follows:\n\n.. code:: python\n\n    def run(self):\n        data_a = self.load('a')\n        data_b = self.load('b')\n\n\nAs an alternative, the `load` method loads individual task input by passing an instance of TaskOnKart as follows:\n\n.. code:: python\n\n    def run(self):\n        data_a = self.load(TaskA())\n        data_b = self.load(TaskB())\n\n\nWe can also omit the :func:`~gokart.task.TaskOnKart.requires` and write the task used by :func:`~gokart.parameter.TaskInstanceParameter`.\nAlso please refer to :func:`~gokart.task.TaskOnKart.load`, :doc:`task_parameters`, and described later Advanced Features section.\n\n\nTaskOnKart.dump\n----------------\nThe :func:`~gokart.task.TaskOnKart.dump` method is used to dump results of tasks.\nFor instance, an example implementation could be as follows:\n\n.. code:: python\n\n    def output(self):\n        return self.make_target('output.pkl')\n\n    def run(self):\n        results = do_something(self.load())\n        self.dump(results)\n\n\nIn the case that a task has 2 or more output, it is possible to specify output target by passing a key of dictionary like follows:\n\n.. code:: python\n\n    def output(self):\n        return dict(a=self.make_target('output_a.pkl'), b=self.make_target('output_b.pkl'))\n\n    def run(self):\n        a_data = do_something_a(self.load())\n        b_data = do_something_b(self.load())\n        self.dump(a_data, 'a')\n        self.dump(b_data, 'b')\n\nPlease refer to :func:`~gokart.task.TaskOnKart.dump`.\n\n\nAdvanced Features\n---------------------\n\nTaskOnKart.load_generator\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nThe :func:`~gokart.task.TaskOnKart.load_generator` method is used to load input data with generator.\nFor instance, an example implementation could be as follows:\n\n.. code:: python\n\n    def requires(self):\n        return TaskA(param='called by TaskB')\n\n    def run(self):\n        for data in self.load_generator():\n            any_process(data)\n\n\nUsage is the same as `TaskOnKart.generator`.\n`load_generator` reads the divided file into iterations.\nIt's effective when can't read all data to memory, because `load_generator` doesn't load all files at once.\n\nPlease refer to :func:`~gokart.task.TaskOnKart.load_generator`.\n\n\nTaskOnKart.make_model_target\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nThe :func:`~gokart.task.TaskOnKart.make_model_target` method is used to dump for non supported file types.\n\n.. code:: python\n\n    import gensim\n\n    class TrainWord2Vec(gokart.TaskOnKart[Word2VecResult]):\n        def output(self):\n            # please use 'zip'.\n            return self.make_model_target(\n                'model.zip',\n                save_function=gensim.model.Word2Vec.save,\n                load_function=gensim.model.Word2Vec.load)\n\n        def run(self):\n            # -- train word2vec ---\n            word2vec = train_word2vec()\n            self.dump(word2vec)\n\nIt is dumped and zipped with ``gensim.model.Word2Vec.save``.\n\nPlease refer to :func:`~gokart.task.TaskOnKart.make_model_target`.\n\n\nTaskOnKart.fail_on_empty_dump\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nPlease refer to :doc:`for_pandas`.\n\n\nTaskOnKart.should_dump_supplementary_log_files\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nWhether to dump supplementary files (task_log, random_seed, task_params, processing_time, module_versions) or not. Default is True.\n\nNote that when set to False, task_info functions (e.g. gokart.tree.task_info.make_task_info_as_tree_str()) cannot be used.\n\n\nDump csv with encoding\n~~~~~~~~~~~~~~~~~~~~~~~\n\nYou can dump csv file by implementing `Task.output()` method as follows:\n\n.. code:: python\n\n    def output(self):\n        return self.make_target('file_name.csv')\n\nBy default, csv file is dumped with `utf-8` encoding.\n\nIf you want to dump csv file with other encodings, you can use `encoding` parameter as follows:\n\n.. code:: python\n\n    from gokart.file_processor import CsvFileProcessor\n\n    def output(self):\n        return self.make_target('file_name.csv', processor=CsvFileProcessor(encoding='cp932'))\n        # This will dump csv as 'cp932' which is used in Windows.\n\nCache output in memory instead of dumping to files\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nYou can use :class:`~InMemoryTarget` to cache output in memory instead of dumping to files by calling :func:`~gokart.target.make_in_memory_target`.\n\nPlease note that :class:`~InMemoryTarget` is an experimental feature.\n\n.. code:: python\n\n    from gokart.in_memory.target import make_in_memory_target\n\n    def output(self):\n        unique_id = self.make_unique_id() if use_unique_id else None\n        # TaskLock is not supported in InMemoryTarget, so it's dummy\n        task_lock_params = make_task_lock_params(\n            file_path='dummy_path',\n            unique_id=unique_id,\n            redis_host=None,\n            redis_port=None,\n            redis_timeout=self.redis_timeout,\n            raise_task_lock_exception_on_collision=False,\n        )\n        return make_in_memory_target('dummy_path', task_lock_params, unique_id)\n"
  },
  {
    "path": "docs/task_parameters.rst",
    "content": "=================\nTask Parameters\n=================\n\nLuigi Parameter\n================\n\nWe can set parameters for tasks.\nAlso please refer to :doc:`task_settings` section.\n\n.. code:: python\n\n    class Task(gokart.TaskOnKart):\n        param_a: luigi.Parameter = luigi.Parameter()\n        param_c: luigi.ListParameter = luigi.ListParameter()\n        param_d: luigi.IntParameter = luigi.IntParameter(default=1)\n\nPlease refer to `luigi document <https://luigi.readthedocs.io/en/stable/api/luigi.parameter.html>`_ for a list of parameter types.\n\n\nGokart Parameter\n================\n\nThere are also parameters provided by gokart.\n\n- gokart.TaskInstanceParameter\n- gokart.ListTaskInstanceParameter\n- gokart.ExplicitBoolParameter\n\n\ngokart.TaskInstanceParameter\n--------------------------------\n\nThe :func:`~gokart.parameter.TaskInstanceParameter` executes a task using the results of a task as dynamic parameters.\n\n\n.. code:: python\n\n    class TaskA(gokart.TaskOnKart[str]):\n        def run(self):\n            self.dump('Hello')\n\n\n    class TaskB(gokart.TaskOnKart[str]):\n        require_task: gokart.TaskInstanceParameter = gokart.TaskInstanceParameter()\n\n        def requires(self):\n            return self.require_task\n\n        def run(self):\n            task_a = self.load()\n            self.dump(','.join([task_a, 'world']))\n\n    task = TaskB(require_task=TaskA())\n    print(gokart.build(task))  # Hello,world\n\n\nHelps to create a pipeline.\n\n\ngokart.ListTaskInstanceParameter\n-------------------------------------\n\nThe :func:`~gokart.parameter.ListTaskInstanceParameter` is list of TaskInstanceParameter.\n\n\ngokart.ExplicitBoolParameter\n-----------------------------------\n\nThe :func:`~gokart.parameter.ExplicitBoolParameter` is parameter for explicitly specified value.\n\n``luigi.BoolParameter`` already has \"explicit parsing\" feature, but also still has implicit behavior like follows.\n\n::\n\n    $ python main.py Task --param\n    # param will be set as True\n    $ python main.py Task\n    # param will be set as False\n\n``ExplicitBoolParameter`` solves these problems on parameters from command line.\n\n\ngokart.SerializableParameter\n----------------------------\n\nThe :func:`~gokart.parameter.SerializableParameter` is a parameter for any object that can be serialized and deserialized.\nThis parameter is particularly useful when you want to pass a complex object or a set of parameters to a task.\n\nThe object must implement the following methods:\n\n- ``gokart_serialize``: Serialize the object to a string. This serialized string must uniquely identify the object to enable task caching.\n  Note that it is not required for deserialization.\n- ``gokart_deserialize``: Deserialize the object from a string, typically used for CLI arguments.\n\nExample\n^^^^^^^\n\n.. code-block:: python\n\n    import json\n    from dataclasses import dataclass\n\n    import gokart\n\n    @dataclass(frozen=True)\n    class Config:\n        foo: int\n        # The `bar` field does not affect the result of the task.\n        # Similar to `luigi.Parameter(significant=False)`.\n        bar: str\n\n        def gokart_serialize(self) -> str:\n            # Serialize only the `foo` field since `bar` is irrelevant for caching.\n            return json.dumps({'foo': self.foo})\n\n        @classmethod\n        def gokart_deserialize(cls, s: str) -> 'Config':\n            # Deserialize the object from the provided string.\n            return cls(**json.loads(s))\n\n    class DummyTask(gokart.TaskOnKart):\n        config: gokart.SerializableParameter[Config] = gokart.SerializableParameter(object_type=Config)\n\n        def run(self):\n            # Save the `config` object as part of the task result.\n            self.dump(self.config)\n"
  },
  {
    "path": "docs/task_settings.rst",
    "content": "Task Settings\n=============\n\nTask settings. Also please refer to :doc:`task_parameters` section.\n\n\nDirectory to Save Outputs\n-------------------------\n\nWe can use both a local directory and the S3 to save outputs.\nIf you would like to use local directory, please set a local directory path to :attr:`~gokart.task.TaskOnKart.workspace_directory`. Please refer to :doc:`task_parameters` for how to set it up.\n\nIt is recommended to use the config file since it does not change much.\n\n::\n\n    # base.ini\n    [TaskOnKart]\n    workspace_directory=${TASK_WORKSPACE_DIRECTORY}\n\n.. code:: python\n\n    # main.py\n    import gokart\n    gokart.add_config('base.ini')\n\n\nTo use the S3 or GCS repository, please set the bucket path as ``s3://{YOUR_REPOSITORY_NAME}`` or ``gs://{YOUR_REPOSITORY_NAME}`` respectively.\n\nIf use S3 or GCS, please set credential information to Environment Variables.\n\n.. code:: sh\n\n    # S3\n    export AWS_ACCESS_KEY_ID='~~~'  # AWS access key\n    export AWS_SECRET_ACCESS_KEY='~~~'  # AWS secret access key\n\n    # GCS\n    export GCS_CREDENTIAL='~~~'  # GCS credential\n    export DISCOVER_CACHE_LOCAL_PATH='~~~'  # The local file path of discover api cache.\n\n\nRerun task\n----------\n\nThere are times when we want to rerun a task, such as when change script or on batch. Please use the ``rerun`` parameter or add an arbitrary parameter.\n\n\nWhen set rerun as follows:\n\n.. code:: python\n\n    # rerun TaskA\n    gokart.build(Task(rerun=True))\n\n\nWhen used from an argument as follows:\n\n.. code:: python\n\n    # main.py\n    class Task(gokart.TaskOnKart[str]):\n        def run(self):\n            self.dump('hello')\n\n.. code:: sh\n\n    python main.py Task --local-scheduler --rerun\n\n\n``rerun`` parameter will look at the dependent tasks up to one level.\n\nExample: Suppose we have a straight line pipeline composed of TaskA, TaskB and TaskC,  and TaskC is an endpoint of this pipeline. We also suppose that all the tasks have already been executed.\n\n- TaskA(rerun=True)  ->  TaskB  ->  TaskC    # not rerunning\n- TaskA  ->  TaskB(rerun=True)  ->  TaskC    # rerunning TaskB and TaskC\n\nThis is due to the way intermediate files are handled. ``rerun`` parameter is ``significant=False``, it does not affect the hash value. It is very important to understand this difference.\n\n\nIf you want to change the parameter of TaskA and rerun TaskB and TaskC, recommend adding an arbitrary parameter.\n\n.. code:: python\n\n    class TaskA(gokart.TaskOnKart):\n        __version: luigi.IntParameter = luigi.IntParameter(default=1)\n\nIf the hash value of TaskA will change, the dependent tasks (in this case, TaskB and TaskC) will rerun.\n\n\nFix random seed\n---------------\n\nEvery task has a parameter named :attr:`~gokart.task.TaskOnKart.fix_random_seed_methods` and :attr:`~gokart.task.TaskOnKart.fix_random_seed_value`. This can be used to fix the random seed.\n\n\n.. code:: python\n\n    import gokart\n    import random\n    import numpy\n    import torch\n\n    class Task(gokart.TaskOnKart[dict[str, Any]]):\n        def run(self):\n            x = [random.randint(0, 100) for _ in range(0, 10)]\n            y = [np.random.randint(0, 100) for _ in range(0, 10)]\n            z = [torch.randn(1).tolist()[0] for _ in range(0, 5)]\n            self.dump({'random': x, 'numpy': y, 'torch': z})\n\n    gokart.build(\n        Task(\n            fix_random_seed_methods=[\n                \"random.seed\",\n                \"numpy.random.seed\",\n                \"torch.random.manual_seed\"],\n            fix_random_seed_value=57))\n\n::\n\n    # //--- The output is as follows every time. ---\n    # {'random': [65, 41, 61, 37, 55, 81, 48, 2, 94, 21],\n    #   'numpy': [79, 86, 5, 22, 79, 98, 56, 40, 81, 37], 'torch': []}\n    #   'torch': [0.14460121095180511, -0.11649507284164429,\n    #            0.6928958296775818, -0.916053831577301, 0.7317505478858948]}\n\nThis will be useful for using Machine Learning Libraries.\n"
  },
  {
    "path": "docs/tutorial.rst",
    "content": "Tutorial\n========\n\nAlso please refer to :doc:`intro_to_gokart` section.\n\n\n1, Make gokart project\n----------------------\n\nCreate a project using `cookiecutter-gokart <https://github.com/m3dev/cookiecutter-gokart>`_.\n\n\n.. code:: sh\n\n    cookiecutter  https://github.com/m3dev/cookiecutter-gokart\n    # project_name [project_name]: example\n    # package_name [package_name]: gokart_example\n    # python_version [3.7.0]:\n    # author [your name]: m3dev\n    # package_description [What's this project?]: gokart example\n    # license [MIT License]:\n\n\nYou will have a directory tree like following:\n\n.. code:: sh\n\n    tree example/\n    example/\n    ├── Dockerfile\n    ├── README.md\n    ├── conf\n    │   ├── logging.ini\n    │   └── param.ini\n    ├── gokart_example\n    │   ├── __init__.py\n    │   ├── model\n    │   │   ├── __init__.py\n    │   │   └── sample.py\n    │   └── utils\n    │       └── template.py\n    ├── main.py\n    ├── pyproject.toml\n    └── test\n        ├── __init__.py\n        └── unit_test\n            └── test_sample.py\n\n\n2, Running sample task\n----------------------\n\nLet's run the first task.\n\n.. code:: sh\n\n    python main.py gokart_example.Sample --local-scheduler\n\n\nThe results are stored in resources directory.\n\n.. code:: sh\n\n    tree resources\n    resources/\n    ├── gokart_example\n    │   └── model\n    │       └── sample\n    │           └── Sample_cdf55a3d6c255d8c191f5f472da61f99.pkl\n    └── log\n        ├── module_versions\n        │   └── Sample_cdf55a3d6c255d8c191f5f472da61f99.txt\n        ├── processing_time\n        │   └── Sample_cdf55a3d6c255d8c191f5f472da61f99.pkl\n        ├── random_seed\n        │   └── Sample_cdf55a3d6c255d8c191f5f472da61f99.pkl\n        ├── task_log\n        │   └── Sample_cdf55a3d6c255d8c191f5f472da61f99.pkl\n        └── task_params\n            └── Sample_cdf55a3d6c255d8c191f5f472da61f99.pkl\n\n\nPlease refer to :doc:`intro_to_gokart` for output\n\n.. note::\n\n    It is better to use poetry in terms of the module version. Please refer to `poetry document <https://python-poetry.org/docs/>`_\n\n    .. code:: sh\n\n        poetry lock\n        poetry run python main.py gokart_example.Sample --local-scheduler\n\n    If want to stabilize it further, please use docker.\n\n    .. code:: sh\n\n        docker build -t sample .\n        docker run -it sample \"python main.py gokart_example.Sample --local-scheduler\"\n\n\n\n3, Check result\n---------------\n\nCheck the output.\n\n.. code:: python\n\n    with open('resources/gokart_example/model/sample/Sample_cdf55a3d6c255d8c191f5f472da61f99.pkl', 'rb') as f:\n        print(pickle.load(f))  # sample output\n\n\n4, Run unittest\n------------------\n\nIt is important to run unittest before and after modifying the code.\n\n.. code:: sh\n\n    python -m unittest discover -s ./test/unit_test/\n    .\n    ----------------------------------------------------------------------\n    Ran 1 test in 0.001s\n\n    OK\n\n5, Create Task\n--------------\n\nWriting gokart-like tasks.\nModify ``example/gokart_example/model/sample.py`` as follows:\n\n\n.. code:: python\n\n    from logging import getLogger\n    import gokart\n    from gokart_example.utils.template import GokartTask\n    logger = getLogger(__name__)\n\n\n    class Sample(GokartTask):\n        def run(self):\n            self.dump('sample output')\n\n\n    class StringToSplit(GokartTask):\n        \"\"\"Like the function to divide received data by spaces.\"\"\"\n        task: gokart.TaskInstanceParameter = gokart.TaskInstanceParameter()\n\n        def run(self):\n            sample = self.load('task')\n            self.dump(sample.split(' '))\n\n\n    class Main(GokartTask):\n        \"\"\"Endpoint task.\"\"\"\n        def requires(self):\n            return StringToSplit(task=Sample())\n\n\nAdded ``Main`` and ``StringToSplit``. ``StringToSplit`` is a function-like task that loads the result of an arbitrary task and splits it by spaces. ``Main`` is injecting ``Sample`` into ``StringToSplit``. It like Endpoint.\n\nLet’s run the ``Main`` task.\n\n\n.. code:: sh\n\n    python main.py gokart_example.Main --local-scheduler\n\n\nPlease take a look at the logger output at this time.\n\n::\n\n    ===== Luigi Execution Summary =====\n\n    Scheduled 3 tasks of which:\n    * 1 complete ones were encountered:\n        - 1 gokart_example.Sample(...)\n    * 2 ran successfully:\n        - 1 gokart_example.Main(...)\n        - 1 gokart_example.StringToSplit(...)\n\n    This progress looks :) because there were no failed tasks or missing dependencies\n\n    ===== Luigi Execution Summary =====\n\nAs the log shows, ``Sample`` has been executed once, so the ``cache`` will be used.\nThe only things that worked were ``Main`` and ``StringToSplit``.\n\n\nThe output will look like the following, with the result in ``StringToSplit_b8a0ce6c972acbd77eae30f35da4307e.pkl``.\n\n::\n\n    tree resources/\n    resources/\n    ├── gokart_example\n    │   └── model\n    │       └── sample\n    │           ├── Sample_cdf55a3d6c255d8c191f5f472da61f99.pkl\n    │           └── StringToSplit_b8a0ce6c972acbd77eae30f35da4307e.pkl\n    ...\n\n\n.. code:: python\n\n    with open('resources/gokart_example/model/sample/StringToSplit_b8a0ce6c972acbd77eae30f35da4307e.pkl', 'rb') as f:\n        print(pickle.load(f))  # ['sample', 'output']\n\n\nIt was able to move the added task.\n\n\n6, Rerun Task\n-------------\n\nFinally, let's rerun the task.\nThere are two ways to rerun a task.\nChange the ``rerun parameter`` or ``parameters of the dependent tasks``.\n\n\n``gokart.TaskOnKart`` can set ``rerun parameter`` for each task like following:\n\n.. code:: python\n\n    class Main(GokartTask):\n        rerun=True\n\n        def requires(self):\n            return StringToSplit(task=Sample(rerun=True), rerun=True)\n\nOR\n\n\nAdd new parameter on dependent tasks like following:\n\n.. code:: python\n\n    class Sample(GokartTask):\n        version: luigi.IntParameter = luigi.IntParameter(default=1)\n\n        def run(self):\n            self.dump('sample output version {self.version}')\n\n\nIn both cases, all tasks will be rerun.\nThe difference is hash value given to output files.\nThe reurn parameter has no effect on the hash value.\nSo it will be rerun with the same hash value.\n\nIn the second method, ``version parameter`` is added to the ``Sample`` task.\nThis parameter will change the hash value of ``Sample`` and generate another output file.\nAnd the dependent task, ``StringToSplit``, will also have a different hash value, and rerun.\n\nPlease refer to :doc:`task_settings` for details.\n\nPlease try rerunning task at hand:)\n\n\nFeature\n-------\n\nThis is the end of the gokart tutorial.\nThe tutorial is an introduction to some of the features.\nThere are still more useful features.\n\nPlease See :doc:`task_on_kart` section, :doc:`for_pandas` section and :doc:`task_parameters` section for more useful features of the task.\n\nHave a good gokart life.\n"
  },
  {
    "path": "docs/using_task_task_conflict_prevention_lock.rst",
    "content": "Task conflict prevention lock\n=========================\n\nIf there is a possibility of multiple worker nodes executing the same task, task cache conflict may happen.\nSpecifically, while node A is loading the cache of a task, node B may be writing to it.\nThis can lead to reading an inappropriate data and other unwanted behaviors.\n\nThe redis lock introduced in this page is a feature to prevent such cache collisions.\n\nRequires\n--------\n\nYou need to install `redis <https://redis.io/topics/quickstart>`_ for using this advanced feature.\n\n\nHow to use\n-----------\n\n\n1. Set up a redis server at somewhere accessible from gokart/luigi jobs.\n\n    e.g. Following script will run redis at your localhost.\n    \n    .. code:: bash\n\n        $ redis-server\n\n2. Set redis server hostname and port number as parameters of gokart.TaskOnKart().\n\n    You can set it by adding ``--redis-host=[your-redis-localhost] --redis-port=[redis-port-number]`` options to gokart python script.\n\n    e.g. \n\n    .. code:: bash\n\n        python main.py sample.SomeTask --local-scheduler --redis-host=localhost --redis-port=6379\n    \n\n    Alternatively, you may set parameters at config file.\n    \n    e.g.\n\n    .. code::\n\n        [TaskOnKart]\n        redis_host=localhost\n        redis_port=6379\n\n3. Done\n    \n    With the above configuration, all tasks that inherits gokart.TaskOnKart will ask the redis server if any other node is not trying to access the same cache file at the same time whenever they access the file with dump or load.\n"
  },
  {
    "path": "examples/gokart_notebook_example.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"gokart                        1.0.2\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"!pip list | grep gokart\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import gokart\\n\",\n    \"import luigi\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Examples of using gokart at jupyter notebook\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Basic Usage\\n\",\n    \"This is a very basic usage, just to dump a run result of ExampleTaskA.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"example_2\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"class ExampleTaskA(gokart.TaskOnKart):\\n\",\n    \"    param = luigi.Parameter()\\n\",\n    \"    int_param = luigi.IntParameter(default=2)\\n\",\n    \"\\n\",\n    \"    def run(self):\\n\",\n    \"        self.dump(f'DONE {self.param}_{self.int_param}')\\n\",\n    \"\\n\",\n    \"    \\n\",\n    \"task_a = ExampleTaskA(param='example')\\n\",\n    \"output = gokart.build(task=task_a)\\n\",\n    \"print(output)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Make tasks dependencies with `requires()`\\n\",\n    \"ExampleTaskB is dependent on　ExampleTaskC and ExampleTaskD. They are defined in `ExampleTaskB.requires()`.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"DONE example_TASKC_TASKD\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"class ExampleTaskC(gokart.TaskOnKart):\\n\",\n    \"    def run(self):\\n\",\n    \"        self.dump('TASKC')\\n\",\n    \"    \\n\",\n    \"class ExampleTaskD(gokart.TaskOnKart):\\n\",\n    \"    def run(self):\\n\",\n    \"        self.dump('TASKD')\\n\",\n    \"\\n\",\n    \"class ExampleTaskB(gokart.TaskOnKart):\\n\",\n    \"    param = luigi.Parameter()\\n\",\n    \"\\n\",\n    \"    def requires(self):\\n\",\n    \"        return dict(task_c=ExampleTaskC(), task_d=ExampleTaskD())\\n\",\n    \"\\n\",\n    \"    def run(self):\\n\",\n    \"        task_c = self.load('task_c')\\n\",\n    \"        task_d = self.load('task_d')\\n\",\n    \"        self.dump(f'DONE {self.param}_{task_c}_{task_d}')\\n\",\n    \"    \\n\",\n    \"task_b = ExampleTaskB(param='example')\\n\",\n    \"output = gokart.build(task=task_b)\\n\",\n    \"print(output)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Make tasks dependencies with TaskInstanceParameter\\n\",\n    \"The dependencies are same as previous example, however they are defined at the outside of the task instead of defied at `ExampleTaskB.requires()`.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"DONE example_TASKC_TASKD\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"class ExampleTaskC(gokart.TaskOnKart):\\n\",\n    \"    def run(self):\\n\",\n    \"        self.dump('TASKC')\\n\",\n    \"    \\n\",\n    \"class ExampleTaskD(gokart.TaskOnKart):\\n\",\n    \"    def run(self):\\n\",\n    \"        self.dump('TASKD')\\n\",\n    \"\\n\",\n    \"class ExampleTaskB(gokart.TaskOnKart):\\n\",\n    \"    param = luigi.Parameter()\\n\",\n    \"    task_1 = gokart.TaskInstanceParameter()\\n\",\n    \"    task_2 = gokart.TaskInstanceParameter()\\n\",\n    \"\\n\",\n    \"    def requires(self):\\n\",\n    \"        return dict(task_1=self.task_1, task_2=self.task_2)  # required tasks are decided from the task parameters `task_1` and `task_2`\\n\",\n    \"\\n\",\n    \"    def run(self):\\n\",\n    \"        task_1 = self.load('task_1')\\n\",\n    \"        task_2 = self.load('task_2')\\n\",\n    \"        self.dump(f'DONE {self.param}_{task_1}_{task_2}')\\n\",\n    \"    \\n\",\n    \"task_b = ExampleTaskB(param='example', task_1=ExampleTaskC(), task_2=ExampleTaskD())  # Dependent tasks are defined here\\n\",\n    \"output = gokart.build(task=task_b)\\n\",\n    \"print(output)\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3.8.8 64-bit ('3.8.8': pyenv)\",\n   \"name\": \"python388jvsc74a57bd026997db2bf0f03e18da4e606f276befe0d6bf7cab2a6bb74742969d5bbde02ca\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.8.8\"\n  },\n  \"metadata\": {\n   \"interpreter\": {\n    \"hash\": \"26997db2bf0f03e18da4e606f276befe0d6bf7cab2a6bb74742969d5bbde02ca\"\n   }\n  },\n  \"orig_nbformat\": 3\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}"
  },
  {
    "path": "examples/logging.ini",
    "content": "[loggers]\nkeys=root,luigi,luigi-interface,gokart\n\n[handlers]\nkeys=stderrHandler\n\n[formatters]\nkeys=simpleFormatter\n\n[logger_root]\nlevel=INFO\nhandlers=stderrHandler\n\n[logger_gokart]\nlevel=INFO\nhandlers=stderrHandler\nqualname=gokart\npropagate=0\n\n[logger_luigi]\nlevel=INFO\nhandlers=stderrHandler\nqualname=luigi\npropagate=0\n\n[logger_luigi-interface]\nlevel=INFO\nhandlers=stderrHandler\nqualname=luigi-interface\npropagate=0\n\n[handler_stderrHandler]\nclass=StreamHandler\nformatter=simpleFormatter\nargs=(sys.stdout,)\n\n[formatter_simpleFormatter]\nformat=level=%(levelname)s time=%(asctime)s name=%(name)s file=%(filename)s line=%(lineno)d message=%(message)s\ndatefmt=%Y/%m/%d %H:%M:%S\nclass=logging.Formatter\n"
  },
  {
    "path": "examples/param.ini",
    "content": "[TaskOnKart]\nworkspace_directory=./resource\nlocal_temporary_directory=./resource/tmp\n\n[core]\nlogging_conf_file=logging.ini\n\n"
  },
  {
    "path": "gokart/__init__.py",
    "content": "__all__ = [\n    'build',\n    'WorkerSchedulerFactory',\n    'make_tree_info',\n    'tree_info',\n    'PandasTypeConfig',\n    'ExplicitBoolParameter',\n    'ListTaskInstanceParameter',\n    'SerializableParameter',\n    'TaskInstanceParameter',\n    'ZonedDateSecondParameter',\n    'run',\n    'TaskOnKart',\n    'test_run',\n    'make_task_info_as_tree_str',\n    'add_config',\n    'delete_local_unnecessary_outputs',\n]\n\nfrom gokart.build import WorkerSchedulerFactory, build\nfrom gokart.info import make_tree_info, tree_info\nfrom gokart.pandas_type_config import PandasTypeConfig\nfrom gokart.parameter import (\n    ExplicitBoolParameter,\n    ListTaskInstanceParameter,\n    SerializableParameter,\n    TaskInstanceParameter,\n    ZonedDateSecondParameter,\n)\nfrom gokart.run import run\nfrom gokart.task import TaskOnKart\nfrom gokart.testing import test_run\nfrom gokart.tree.task_info import make_task_info_as_tree_str\nfrom gokart.utils import add_config\nfrom gokart.workspace_management import delete_local_unnecessary_outputs\n"
  },
  {
    "path": "gokart/build.py",
    "content": "from __future__ import annotations\n\nimport enum\nimport io\nimport logging\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom logging import getLogger\nfrom typing import Any, Literal, Protocol, TypeVar, cast, overload\n\nimport backoff\nimport luigi\nfrom luigi import LuigiStatusCode, rpc, scheduler\n\nimport gokart\nimport gokart.tree.task_info\nfrom gokart import worker\nfrom gokart.conflict_prevention_lock.task_lock import TaskLockException\nfrom gokart.target import TargetOnKart\nfrom gokart.task import TaskOnKart\n\nT = TypeVar('T')\n\nlogger: logging.Logger = logging.getLogger(__name__)\n\n\nclass LoggerConfig:\n    def __init__(self, level: int):\n        self.logger = getLogger(__name__)\n        self.default_level = self.logger.level\n        self.level = level\n\n    def __enter__(self):\n        logging.disable(self.level - 10)  # subtract 10 to disable below self.level\n        self.logger.setLevel(self.level)\n        return self\n\n    def __exit__(self, exception_type, exception_value, traceback):\n        logging.disable(self.default_level - 10)  # subtract 10 to disable below self.level\n        self.logger.setLevel(self.default_level)\n\n\nclass GokartBuildError(Exception):\n    \"\"\"Raised when ``gokart.build`` failed. This exception contains raised exceptions in the task execution.\"\"\"\n\n    def __init__(self, message: str, raised_exceptions: dict[str, list[Exception]]) -> None:\n        super().__init__(message)\n        self.raised_exceptions = raised_exceptions\n\n\nclass HasLockedTaskException(Exception):\n    \"\"\"Raised when the task failed to acquire the lock in the task execution.\"\"\"\n\n\nclass TaskLockExceptionRaisedFlag:\n    def __init__(self):\n        self.flag: bool = False\n\n\nclass WorkerProtocol(Protocol):\n    \"\"\"Protocol for Worker.\n    This protocol is determined by luigi.worker.Worker.\n    \"\"\"\n\n    def add(self, task: TaskOnKart[Any]) -> bool: ...\n\n    def run(self) -> bool: ...\n\n    def __enter__(self) -> WorkerProtocol: ...\n\n    def __exit__(self, type: Any, value: Any, traceback: Any) -> Literal[False]: ...\n\n\nclass WorkerSchedulerFactory:\n    def create_local_scheduler(self) -> scheduler.Scheduler:\n        return scheduler.Scheduler(prune_on_get_work=True, record_task_history=False)\n\n    def create_remote_scheduler(self, url: str) -> rpc.RemoteScheduler:\n        return rpc.RemoteScheduler(url)\n\n    def create_worker(self, scheduler: scheduler.Scheduler, worker_processes: int, assistant: bool = False) -> WorkerProtocol:\n        return worker.Worker(scheduler=scheduler, worker_processes=worker_processes, assistant=assistant)\n\n\ndef _get_output(task: TaskOnKart[T]) -> T:\n    output = task.output()\n    # FIXME: currently, nested output is not supported\n    if isinstance(output, list) or isinstance(output, tuple):\n        return cast(T, [t.load() for t in output if isinstance(t, TargetOnKart)])\n    if isinstance(output, dict):\n        return cast(T, {k: t.load() for k, t in output.items() if isinstance(t, TargetOnKart)})\n    if isinstance(output, TargetOnKart):\n        return cast(T, output.load())\n    raise ValueError(f'output type is not supported: {type(output)}')\n\n\ndef _reset_register(keep={'gokart', 'luigi'}):\n    \"\"\"reset luigi.task_register.Register._reg everytime gokart.build called to avoid TaskClassAmbigiousException\"\"\"\n    luigi.task_register.Register._reg = [\n        x\n        for x in luigi.task_register.Register._reg\n        if (\n            (x.__module__.split('.')[0] in keep)  # keep luigi and gokart\n            or (issubclass(x, gokart.PandasTypeConfig))\n        )  # PandasTypeConfig should be kept\n    ]\n\n\nclass TaskDumpMode(enum.Enum):\n    TREE = 'tree'\n    TABLE = 'table'\n    NONE = 'none'\n\n\nclass TaskDumpOutputType(enum.Enum):\n    PRINT = 'print'\n    DUMP = 'dump'\n    NONE = 'none'\n\n\n@dataclass\nclass TaskDumpConfig:\n    mode: TaskDumpMode = TaskDumpMode.NONE\n    output_type: TaskDumpOutputType = TaskDumpOutputType.NONE\n\n\ndef process_task_info(task: TaskOnKart[Any], task_dump_config: TaskDumpConfig = TaskDumpConfig()) -> None:\n    match task_dump_config:\n        case TaskDumpConfig(mode=TaskDumpMode.NONE, output_type=TaskDumpOutputType.NONE):\n            pass\n        case TaskDumpConfig(mode=TaskDumpMode.TREE, output_type=TaskDumpOutputType.PRINT):\n            tree = gokart.make_tree_info(task)\n            logger.info(tree)\n        case TaskDumpConfig(mode=TaskDumpMode.TABLE, output_type=TaskDumpOutputType.PRINT):\n            table = gokart.tree.task_info.make_task_info_as_table(task)\n            output = io.StringIO()\n            table.to_csv(output, index=False, sep='\\t')\n            output.seek(0)\n            logger.info(output.read())\n        case TaskDumpConfig(mode=TaskDumpMode.TREE, output_type=TaskDumpOutputType.DUMP):\n            tree = gokart.make_tree_info(task)\n            gokart.TaskOnKart().make_target(f'log/task_info/{type(task).__name__}.txt').dump(tree)\n        case TaskDumpConfig(mode=TaskDumpMode.TABLE, output_type=TaskDumpOutputType.DUMP):\n            table = gokart.tree.task_info.make_task_info_as_table(task)\n            gokart.TaskOnKart().make_target(f'log/task_info/{type(task).__name__}.pkl').dump(table)\n        case _:\n            raise ValueError(f'Unsupported TaskDumpConfig: {task_dump_config}')\n\n\n@overload\ndef build(\n    task: TaskOnKart[T],\n    return_value: Literal[True] = True,\n    reset_register: bool = True,\n    log_level: int = logging.ERROR,\n    task_lock_exception_max_tries: int = 10,\n    task_lock_exception_max_wait_seconds: int = 600,\n    **env_params: Any,\n) -> T: ...\n\n\n@overload\ndef build(\n    task: TaskOnKart[T],\n    return_value: Literal[False],\n    reset_register: bool = True,\n    log_level: int = logging.ERROR,\n    task_lock_exception_max_tries: int = 10,\n    task_lock_exception_max_wait_seconds: int = 600,\n    **env_params: Any,\n) -> None: ...\n\n\ndef build(\n    task: TaskOnKart[T],\n    return_value: bool = True,\n    reset_register: bool = True,\n    log_level: int = logging.ERROR,\n    task_lock_exception_max_tries: int = 10,\n    task_lock_exception_max_wait_seconds: int = 600,\n    task_dump_config: TaskDumpConfig = TaskDumpConfig(),\n    **env_params: Any,\n) -> T | None:\n    \"\"\"\n    Run gokart task for local interpreter.\n    Sharing the most of its parameters with luigi.build (see https://luigi.readthedocs.io/en/stable/api/luigi.html?highlight=build#luigi.build)\n    \"\"\"\n    if reset_register:\n        _reset_register()\n    with LoggerConfig(level=log_level):\n        log_handler_before_run = logging.StreamHandler()\n        logger.addHandler(log_handler_before_run)\n        process_task_info(task, task_dump_config)\n        logger.removeHandler(log_handler_before_run)\n        log_handler_before_run.close()\n\n        task_lock_exception_raised = TaskLockExceptionRaisedFlag()\n        raised_exceptions: dict[str, list[Exception]] = dict()\n\n        @TaskOnKart.event_handler(luigi.Event.FAILURE)\n        def when_failure(task, exception):\n            if isinstance(exception, TaskLockException):\n                task_lock_exception_raised.flag = True\n            else:\n                raised_exceptions.setdefault(task.make_unique_id(), []).append(exception)\n\n        @backoff.on_exception(\n            partial(backoff.expo, max_value=task_lock_exception_max_wait_seconds), HasLockedTaskException, max_tries=task_lock_exception_max_tries\n        )\n        def _build_task():\n            task_lock_exception_raised.flag = False\n            result = luigi.build(\n                [task],\n                worker_scheduler_factory=WorkerSchedulerFactory(),\n                local_scheduler=True,\n                detailed_summary=True,\n                log_level=logging.getLevelName(log_level),\n                **env_params,\n            )\n            if task_lock_exception_raised.flag:\n                raise HasLockedTaskException()\n            if result.status in (LuigiStatusCode.FAILED, LuigiStatusCode.FAILED_AND_SCHEDULING_FAILED, LuigiStatusCode.SCHEDULING_FAILED):\n                raise GokartBuildError(result.summary_text, raised_exceptions=raised_exceptions)\n            return _get_output(task) if return_value else None\n\n        return cast(T | None, _build_task())\n"
  },
  {
    "path": "gokart/config_params.py",
    "content": "from __future__ import annotations\n\nfrom typing import Any\n\nimport luigi\n\nimport gokart\n\n\nclass inherits_config_params:\n    def __init__(self, config_class: type[luigi.Config], parameter_alias: dict[str, str] | None = None):\n        \"\"\"\n        Decorates task to inherit parameter value of `config_class`.\n\n        * config_class: Inherit parameter value of this task to decorated task. Only parameter values exist in both tasks are inherited.\n        * parameter_alias: Dictionary to map paramter names between config_class task and decorated task.\n                           key: config_class's parameter name. value: decorated task's parameter name.\n        \"\"\"\n\n        self._config_class: type[luigi.Config] = config_class\n        self._parameter_alias: dict[str, str] = parameter_alias if parameter_alias is not None else {}\n\n    def __call__(self, task_class: type[gokart.TaskOnKart[Any]]) -> type[gokart.TaskOnKart[Any]]:\n        # wrap task to prevent task name from being changed\n        @luigi.task._task_wraps(task_class)\n        class Wrapped(task_class):  # type: ignore\n            @classmethod\n            def get_param_values(cls, params, args, kwargs):\n                for param_key, param_value in self._config_class().param_kwargs.items():\n                    task_param_key = self._parameter_alias.get(param_key, param_key)\n\n                    if hasattr(cls, task_param_key) and task_param_key not in kwargs:\n                        kwargs[task_param_key] = param_value\n                return super().get_param_values(params, args, kwargs)\n\n        return Wrapped\n"
  },
  {
    "path": "gokart/conflict_prevention_lock/task_lock.py",
    "content": "from __future__ import annotations\n\nimport functools\nimport os\nfrom logging import getLogger\nfrom typing import Any, NamedTuple\n\nimport redis\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\nlogger = getLogger(__name__)\n\n\nclass TaskLockParams(NamedTuple):\n    redis_host: str | None\n    redis_port: int | None\n    redis_timeout: int | None\n    redis_key: str\n    should_task_lock: bool\n    raise_task_lock_exception_on_collision: bool\n    lock_extend_seconds: int\n\n\nclass TaskLockException(Exception):\n    pass\n    \"\"\"Raised when the task failed to acquire the lock in the task execution. Only used internally.\"\"\"\n\n\nclass RedisClient:\n    _instances: dict[Any, Any] = {}\n\n    def __new__(cls, *args, **kwargs):\n        key = (args, tuple(sorted(kwargs.items())))\n        if cls not in cls._instances:\n            cls._instances[cls] = {}\n        if key not in cls._instances[cls]:\n            cls._instances[cls][key] = super().__new__(cls)\n        return cls._instances[cls][key]\n\n    def __init__(self, host: str | None, port: int | None) -> None:\n        if not hasattr(self, '_redis_client'):\n            host = host or 'localhost'\n            port = port or 6379\n            self._redis_client = redis.Redis(host=host, port=port)\n\n    def get_redis_client(self):\n        return self._redis_client\n\n\ndef _extend_lock(task_lock: redis.lock.Lock, redis_timeout: int) -> None:\n    task_lock.extend(additional_time=redis_timeout, replace_ttl=True)\n\n\ndef set_task_lock(task_lock_params: TaskLockParams) -> redis.lock.Lock:\n    redis_client = RedisClient(host=task_lock_params.redis_host, port=task_lock_params.redis_port).get_redis_client()\n    blocking = not task_lock_params.raise_task_lock_exception_on_collision\n    task_lock = redis.lock.Lock(redis=redis_client, name=task_lock_params.redis_key, timeout=task_lock_params.redis_timeout, thread_local=False)\n    if not task_lock.acquire(blocking=blocking):\n        raise TaskLockException('Lock already taken by other task.')\n    return task_lock\n\n\ndef set_lock_scheduler(task_lock: redis.lock.Lock, task_lock_params: TaskLockParams) -> BackgroundScheduler:\n    scheduler = BackgroundScheduler()\n    extend_lock = functools.partial(_extend_lock, task_lock=task_lock, redis_timeout=task_lock_params.redis_timeout or 0)\n    scheduler.add_job(\n        extend_lock,\n        'interval',\n        seconds=task_lock_params.lock_extend_seconds,\n        max_instances=999999999,\n        misfire_grace_time=task_lock_params.redis_timeout,\n        coalesce=False,\n    )\n    scheduler.start()\n    return scheduler\n\n\ndef make_task_lock_key(file_path: str, unique_id: str | None) -> str:\n    basename_without_ext = os.path.splitext(os.path.basename(file_path))[0]\n    return f'{basename_without_ext}_{unique_id}'\n\n\ndef make_task_lock_params(\n    file_path: str,\n    unique_id: str | None,\n    redis_host: str | None = None,\n    redis_port: int | None = None,\n    redis_timeout: int | None = None,\n    raise_task_lock_exception_on_collision: bool = False,\n    lock_extend_seconds: int = 10,\n) -> TaskLockParams:\n    redis_key = make_task_lock_key(file_path, unique_id)\n    should_task_lock = redis_host is not None and redis_port is not None\n    if redis_timeout is not None:\n        assert redis_timeout > lock_extend_seconds, f'`redis_timeout` must be set greater than lock_extend_seconds:{lock_extend_seconds}, not {redis_timeout}.'\n    task_lock_params = TaskLockParams(\n        redis_host=redis_host,\n        redis_port=redis_port,\n        redis_key=redis_key,\n        should_task_lock=should_task_lock,\n        redis_timeout=redis_timeout,\n        raise_task_lock_exception_on_collision=raise_task_lock_exception_on_collision,\n        lock_extend_seconds=lock_extend_seconds,\n    )\n    return task_lock_params\n\n\ndef make_task_lock_params_for_run(task_self: Any, lock_extend_seconds: int = 10) -> TaskLockParams:\n    task_path_name = os.path.join(task_self.__module__.replace('.', '/'), f'{type(task_self).__name__}')\n    unique_id = task_self.make_unique_id() + '-run'\n    task_lock_key = make_task_lock_key(file_path=task_path_name, unique_id=unique_id)\n\n    should_task_lock = task_self.redis_host is not None and task_self.redis_port is not None\n    return TaskLockParams(\n        redis_host=task_self.redis_host,\n        redis_port=task_self.redis_port,\n        redis_key=task_lock_key,\n        should_task_lock=should_task_lock,\n        redis_timeout=task_self.redis_timeout,\n        raise_task_lock_exception_on_collision=True,\n        lock_extend_seconds=lock_extend_seconds,\n    )\n"
  },
  {
    "path": "gokart/conflict_prevention_lock/task_lock_wrappers.py",
    "content": "from __future__ import annotations\n\nimport functools\nfrom collections.abc import Callable\nfrom logging import getLogger\nfrom typing import ParamSpec, TypeVar\n\nfrom gokart.conflict_prevention_lock.task_lock import TaskLockParams, set_lock_scheduler, set_task_lock\n\nlogger = getLogger(__name__)\n\n\nP = ParamSpec('P')\nR = TypeVar('R')\n\n\ndef wrap_dump_with_lock(func: Callable[P, R], task_lock_params: TaskLockParams, exist_check: Callable[..., bool]) -> Callable[P, R | None]:\n    \"\"\"Redis lock wrapper function for TargetOnKart.dump().\n    When TargetOnKart.dump() is called, dump() will be wrapped with redis lock and cache existance check.\n    https://github.com/m3dev/gokart/issues/265\n    \"\"\"\n\n    if not task_lock_params.should_task_lock:\n        return func\n\n    def wrapper(*args: P.args, **kwargs: P.kwargs) -> R | None:\n        task_lock = set_task_lock(task_lock_params=task_lock_params)\n        scheduler = set_lock_scheduler(task_lock=task_lock, task_lock_params=task_lock_params)\n\n        try:\n            logger.debug(f'Task DUMP lock of {task_lock_params.redis_key} locked.')\n            if not exist_check():\n                return func(*args, **kwargs)\n            return None\n        finally:\n            logger.debug(f'Task DUMP lock of {task_lock_params.redis_key} released.')\n            task_lock.release()\n            scheduler.shutdown()\n\n    return wrapper\n\n\ndef wrap_load_with_lock(func: Callable[P, R], task_lock_params: TaskLockParams) -> Callable[P, R]:\n    \"\"\"Redis lock wrapper function for TargetOnKart.load().\n    When TargetOnKart.load() is called, redis lock will be locked and released before load().\n    https://github.com/m3dev/gokart/issues/265\n    \"\"\"\n\n    if not task_lock_params.should_task_lock:\n        return func\n\n    def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:\n        task_lock = set_task_lock(task_lock_params=task_lock_params)\n        scheduler = set_lock_scheduler(task_lock=task_lock, task_lock_params=task_lock_params)\n\n        logger.debug(f'Task LOAD lock of {task_lock_params.redis_key} locked.')\n        task_lock.release()\n        logger.debug(f'Task LOAD lock of {task_lock_params.redis_key} released.')\n        scheduler.shutdown()\n        result = func(*args, **kwargs)\n        return result\n\n    return wrapper\n\n\ndef wrap_remove_with_lock(func: Callable[P, R], task_lock_params: TaskLockParams) -> Callable[P, R]:\n    \"\"\"Redis lock wrapper function for TargetOnKart.remove().\n    When TargetOnKart.remove() is called, remove() will be simply wrapped with redis lock.\n    https://github.com/m3dev/gokart/issues/265\n    \"\"\"\n    if not task_lock_params.should_task_lock:\n        return func\n\n    def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:\n        task_lock = set_task_lock(task_lock_params=task_lock_params)\n        scheduler = set_lock_scheduler(task_lock=task_lock, task_lock_params=task_lock_params)\n\n        try:\n            logger.debug(f'Task REMOVE lock of {task_lock_params.redis_key} locked.')\n            result = func(*args, **kwargs)\n            task_lock.release()\n            logger.debug(f'Task REMOVE lock of {task_lock_params.redis_key} released.')\n            scheduler.shutdown()\n            return result\n        except BaseException as e:\n            logger.debug(f'Task REMOVE lock of {task_lock_params.redis_key} released with BaseException.')\n            task_lock.release()\n            scheduler.shutdown()\n            raise e\n\n    return wrapper\n\n\ndef wrap_run_with_lock(run_func: Callable[[], R], task_lock_params: TaskLockParams) -> Callable[[], R]:\n    @functools.wraps(run_func)\n    def wrapped():\n        task_lock = set_task_lock(task_lock_params=task_lock_params)\n        scheduler = set_lock_scheduler(task_lock=task_lock, task_lock_params=task_lock_params)\n\n        try:\n            logger.debug(f'Task RUN lock of {task_lock_params.redis_key} locked.')\n            result = run_func()\n            task_lock.release()\n            logger.debug(f'Task RUN lock of {task_lock_params.redis_key} released.')\n            scheduler.shutdown()\n            return result\n        except BaseException as e:\n            logger.debug(f'Task RUN lock of {task_lock_params.redis_key} released with BaseException.')\n            task_lock.release()\n            scheduler.shutdown()\n            raise e\n\n    return wrapped\n"
  },
  {
    "path": "gokart/errors/__init__.py",
    "content": "from gokart.build import GokartBuildError, HasLockedTaskException\nfrom gokart.pandas_type_config import PandasTypeError\nfrom gokart.task import EmptyDumpError\n\n__all__ = [\n    'GokartBuildError',\n    'HasLockedTaskException',\n    'PandasTypeError',\n    'EmptyDumpError',\n]\n"
  },
  {
    "path": "gokart/file_processor/__init__.py",
    "content": "\"\"\"File processor module with support for multiple DataFrame backends.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nfrom typing import Any, Literal\n\n# Export common processors and types from base\nfrom gokart.file_processor.base import (\n    BinaryFileProcessor,\n    DataFrameType,\n    FileProcessor,\n    GzipFileProcessor,\n    NpzFileProcessor,\n    PickleFileProcessor,\n    TextFileProcessor,\n    XmlFileProcessor,\n)\n\n# Import backend-specific implementations\nfrom gokart.file_processor.pandas import (\n    CsvFileProcessorPandas,\n    FeatherFileProcessorPandas,\n    JsonFileProcessorPandas,\n    ParquetFileProcessorPandas,\n)\nfrom gokart.file_processor.polars import (\n    CsvFileProcessorPolars,\n    FeatherFileProcessorPolars,\n    JsonFileProcessorPolars,\n    ParquetFileProcessorPolars,\n)\n\n\nclass CsvFileProcessor(FileProcessor):\n    \"\"\"CSV file processor with automatic backend selection based on dataframe_type.\"\"\"\n\n    def __init__(self, sep: str = ',', encoding: str = 'utf-8', dataframe_type: DataFrameType = 'pandas') -> None:\n        \"\"\"\n        CSV file processor with support for both pandas and polars DataFrames.\n\n        Args:\n            sep: CSV delimiter (default: ',')\n            encoding: File encoding (default: 'utf-8')\n            dataframe_type: DataFrame library to use for load() - 'pandas', 'polars', or 'polars-lazy' (default: 'pandas')\n        \"\"\"\n        self._sep = sep\n        self._encoding = encoding\n        self._dataframe_type = dataframe_type  # Store for tests\n\n        if dataframe_type == 'polars-lazy':\n            self._impl: FileProcessor = CsvFileProcessorPolars(sep=sep, encoding=encoding, lazy=True)\n        elif dataframe_type == 'polars':\n            self._impl = CsvFileProcessorPolars(sep=sep, encoding=encoding, lazy=False)\n        else:\n            self._impl = CsvFileProcessorPandas(sep=sep, encoding=encoding)\n\n    def format(self):\n        return self._impl.format()\n\n    def load(self, file):\n        return self._impl.load(file)\n\n    def dump(self, obj, file):\n        return self._impl.dump(obj, file)\n\n\nclass JsonFileProcessor(FileProcessor):\n    \"\"\"JSON file processor with automatic backend selection based on dataframe_type.\"\"\"\n\n    def __init__(self, orient: Literal['split', 'records', 'index', 'table', 'columns', 'values'] | None = None, dataframe_type: DataFrameType = 'pandas'):\n        \"\"\"\n        JSON file processor with support for both pandas and polars DataFrames.\n\n        Args:\n            orient: JSON orientation. 'records' for newline-delimited JSON.\n            dataframe_type: DataFrame library to use for load() - 'pandas', 'polars', or 'polars-lazy' (default: 'pandas')\n        \"\"\"\n        self._orient = orient\n        self._dataframe_type = dataframe_type  # Store for tests\n\n        if dataframe_type == 'polars-lazy':\n            self._impl: FileProcessor = JsonFileProcessorPolars(orient=orient, lazy=True)\n        elif dataframe_type == 'polars':\n            self._impl = JsonFileProcessorPolars(orient=orient, lazy=False)\n        else:\n            self._impl = JsonFileProcessorPandas(orient=orient)\n\n    def format(self):\n        return self._impl.format()\n\n    def load(self, file):\n        return self._impl.load(file)\n\n    def dump(self, obj, file):\n        return self._impl.dump(obj, file)\n\n\nclass ParquetFileProcessor(FileProcessor):\n    \"\"\"Parquet file processor with automatic backend selection based on dataframe_type.\"\"\"\n\n    def __init__(self, engine: Any = 'pyarrow', compression: Any = None, dataframe_type: DataFrameType = 'pandas') -> None:\n        \"\"\"\n        Parquet file processor with support for both pandas and polars DataFrames.\n\n        Args:\n            engine: Parquet engine (pandas-specific, ignored for polars).\n            compression: Compression type.\n            dataframe_type: DataFrame library to use for load() - 'pandas', 'polars', or 'polars-lazy' (default: 'pandas')\n        \"\"\"\n        self._engine = engine\n        self._compression = compression\n        self._dataframe_type = dataframe_type  # Store for tests\n\n        if dataframe_type == 'polars-lazy':\n            self._impl: FileProcessor = ParquetFileProcessorPolars(engine=engine, compression=compression, lazy=True)\n        elif dataframe_type == 'polars':\n            self._impl = ParquetFileProcessorPolars(engine=engine, compression=compression, lazy=False)\n        else:\n            self._impl = ParquetFileProcessorPandas(engine=engine, compression=compression)\n\n    def format(self):\n        return self._impl.format()\n\n    def load(self, file):\n        return self._impl.load(file)\n\n    def dump(self, obj, file):\n        # Use the configured implementation (pandas by default)\n        return self._impl.dump(obj, file)\n\n\nclass FeatherFileProcessor(FileProcessor):\n    \"\"\"Feather file processor with automatic backend selection based on dataframe_type.\"\"\"\n\n    def __init__(self, store_index_in_feather: bool, dataframe_type: DataFrameType = 'pandas'):\n        \"\"\"\n        Feather file processor with support for both pandas and polars DataFrames.\n\n        Args:\n            store_index_in_feather: Whether to store pandas index (pandas-only feature).\n            dataframe_type: DataFrame library to use for load() - 'pandas', 'polars', or 'polars-lazy' (default: 'pandas')\n        \"\"\"\n        self._store_index_in_feather = store_index_in_feather\n        self._dataframe_type = dataframe_type  # Store for tests\n\n        if dataframe_type == 'polars-lazy':\n            self._impl: FileProcessor = FeatherFileProcessorPolars(store_index_in_feather=store_index_in_feather, lazy=True)\n        elif dataframe_type == 'polars':\n            self._impl = FeatherFileProcessorPolars(store_index_in_feather=store_index_in_feather, lazy=False)\n        else:\n            self._impl = FeatherFileProcessorPandas(store_index_in_feather=store_index_in_feather)\n\n    def format(self):\n        return self._impl.format()\n\n    def load(self, file):\n        return self._impl.load(file)\n\n    def dump(self, obj, file):\n        # Use the configured implementation (pandas by default)\n        return self._impl.dump(obj, file)\n\n\ndef make_file_processor(file_path: str, store_index_in_feather: bool = True, *, dataframe_type: DataFrameType = 'pandas') -> FileProcessor:\n    \"\"\"Create a file processor based on file extension with default parameters.\"\"\"\n    extension2processor = {\n        '.txt': TextFileProcessor(),\n        '.ini': TextFileProcessor(),\n        '.csv': CsvFileProcessor(sep=',', dataframe_type=dataframe_type),\n        '.tsv': CsvFileProcessor(sep='\\t', dataframe_type=dataframe_type),\n        '.pkl': PickleFileProcessor(),\n        '.gz': GzipFileProcessor(),\n        '.json': JsonFileProcessor(dataframe_type=dataframe_type),\n        '.ndjson': JsonFileProcessor(dataframe_type=dataframe_type, orient='records'),\n        '.xml': XmlFileProcessor(),\n        '.npz': NpzFileProcessor(),\n        '.parquet': ParquetFileProcessor(compression='gzip', dataframe_type=dataframe_type),\n        '.feather': FeatherFileProcessor(store_index_in_feather=store_index_in_feather, dataframe_type=dataframe_type),\n        '.png': BinaryFileProcessor(),\n        '.jpg': BinaryFileProcessor(),\n    }\n\n    extension = os.path.splitext(file_path)[1]\n    assert extension in extension2processor, f'{extension} is not supported. The supported extensions are {list(extension2processor.keys())}.'\n    return extension2processor[extension]\n\n\n__all__ = [\n    # Base classes and types\n    'FileProcessor',\n    'DataFrameType',\n    # Common processors\n    'BinaryFileProcessor',\n    'PickleFileProcessor',\n    'TextFileProcessor',\n    'GzipFileProcessor',\n    'XmlFileProcessor',\n    'NpzFileProcessor',\n    # DataFrame processors (with factory pattern)\n    'CsvFileProcessor',\n    'JsonFileProcessor',\n    'ParquetFileProcessor',\n    'FeatherFileProcessor',\n    # Utility functions\n    'make_file_processor',\n]\n"
  },
  {
    "path": "gokart/file_processor/base.py",
    "content": "from __future__ import annotations\n\nimport xml.etree.ElementTree as ET\nfrom abc import abstractmethod\nfrom io import BytesIO\nfrom logging import getLogger\nfrom typing import Any, Literal, cast\n\nimport dill\nimport luigi\nimport luigi.format\nimport numpy as np\n\nfrom gokart.utils import load_dill_with_pandas_backward_compatibility\n\nlogger = getLogger(__name__)\n\n# Type alias for DataFrame library return type\nDataFrameType = Literal['pandas', 'polars', 'polars-lazy']\n\n\nclass FileProcessor:\n    @abstractmethod\n    def format(self) -> Any: ...\n\n    @abstractmethod\n    def load(self, file: Any) -> Any: ...\n\n    @abstractmethod\n    def dump(self, obj: Any, file: Any) -> None: ...\n\n\nclass BinaryFileProcessor(FileProcessor):\n    \"\"\"\n    Pass bytes to this processor\n\n    ```\n    figure_binary = io.BytesIO()\n    plt.savefig(figure_binary)\n    figure_binary.seek(0)\n    BinaryFileProcessor().dump(figure_binary.read())\n    ```\n    \"\"\"\n\n    def format(self):\n        return luigi.format.Nop\n\n    def load(self, file):\n        return file.read()\n\n    def dump(self, obj, file):\n        file.write(obj)\n\n\nclass _ChunkedLargeFileReader:\n    def __init__(self, file: Any) -> None:\n        self._file = file\n\n    def __getattr__(self, item):\n        return getattr(self._file, item)\n\n    def read(self, n: int) -> bytes:\n        if n >= (1 << 31):\n            logger.info(f'reading a large file with total_bytes={n}.')\n            buffer = bytearray(n)\n            idx = 0\n            while idx < n:\n                batch_size = min(n - idx, (1 << 31) - 1)\n                logger.info(f'reading bytes [{idx}, {idx + batch_size})...')\n                buffer[idx : idx + batch_size] = self._file.read(batch_size)\n                idx += batch_size\n            logger.info('done.')\n            return bytes(buffer)\n        return cast(bytes, self._file.read(n))\n\n    def readline(self) -> bytes:\n        return cast(bytes, self._file.readline())\n\n    def seek(self, offset: int) -> None:\n        self._file.seek(offset)\n\n    def seekable(self) -> bool:\n        return cast(bool, self._file.seekable())\n\n\nclass PickleFileProcessor(FileProcessor):\n    def format(self):\n        return luigi.format.Nop\n\n    def load(self, file):\n        if not file.seekable():\n            # load_dill_with_pandas_backward_compatibility() requires file with seek() and readlines() implemented.\n            # Therefore, we need to wrap with BytesIO which makes file seekable and readlinesable.\n            # For example, ReadableS3File is not a seekable file.\n            return load_dill_with_pandas_backward_compatibility(BytesIO(file.read()))\n        return load_dill_with_pandas_backward_compatibility(_ChunkedLargeFileReader(file))\n\n    def dump(self, obj, file):\n        self._write(dill.dumps(obj, protocol=4), file)\n\n    @staticmethod\n    def _write(buffer, file):\n        n = len(buffer)\n        idx = 0\n        while idx < n:\n            logger.info(f'writing a file with total_bytes={n}...')\n            batch_size = min(n - idx, (1 << 31) - 1)\n            logger.info(f'writing bytes [{idx}, {idx + batch_size})')\n            file.write(buffer[idx : idx + batch_size])\n            idx += batch_size\n        logger.info('done')\n\n\nclass TextFileProcessor(FileProcessor):\n    def format(self):\n        return None\n\n    def load(self, file):\n        return [s.rstrip() for s in file.readlines()]\n\n    def dump(self, obj, file):\n        if isinstance(obj, list):\n            for x in obj:\n                file.write(str(x) + '\\n')\n        else:\n            file.write(str(obj))\n\n\nclass GzipFileProcessor(FileProcessor):\n    def format(self):\n        return luigi.format.Gzip\n\n    def load(self, file):\n        return [s.rstrip().decode() for s in file.readlines()]\n\n    def dump(self, obj, file):\n        if isinstance(obj, list):\n            for x in obj:\n                file.write((str(x) + '\\n').encode())\n        else:\n            file.write(str(obj).encode())\n\n\nclass XmlFileProcessor(FileProcessor):\n    def format(self):\n        return None\n\n    def load(self, file):\n        try:\n            return ET.parse(file)\n        except ET.ParseError:\n            return ET.ElementTree()\n\n    def dump(self, obj, file):\n        assert isinstance(obj, ET.ElementTree), f'requires ET.ElementTree, but {type(obj)} is passed.'\n        obj.write(file)\n\n\nclass NpzFileProcessor(FileProcessor):\n    def format(self):\n        return luigi.format.Nop\n\n    def load(self, file):\n        return np.load(file)['data']\n\n    def dump(self, obj, file):\n        assert isinstance(obj, np.ndarray), f'requires np.ndarray, but {type(obj)} is passed.'\n        np.savez_compressed(file, data=obj)\n"
  },
  {
    "path": "gokart/file_processor/pandas.py",
    "content": "\"\"\"Pandas-specific file processor implementations.\"\"\"\n\nfrom __future__ import annotations\n\nfrom io import BytesIO\nfrom typing import Literal\n\nimport luigi\nimport luigi.format\nimport pandas as pd\nfrom luigi.format import TextFormat\n\nfrom gokart.file_processor.base import FileProcessor\nfrom gokart.object_storage import ObjectStorage\n\n\nclass CsvFileProcessorPandas(FileProcessor):\n    \"\"\"CSV file processor for pandas DataFrames.\"\"\"\n\n    def __init__(self, sep: str = ',', encoding: str = 'utf-8') -> None:\n        self._sep = sep\n        self._encoding = encoding\n        super().__init__()\n\n    def format(self):\n        return TextFormat(encoding=self._encoding)\n\n    def load(self, file):\n        try:\n            return pd.read_csv(file, sep=self._sep, encoding=self._encoding)\n        except pd.errors.EmptyDataError:\n            return pd.DataFrame()\n\n    def dump(self, obj, file):\n        if not isinstance(obj, pd.DataFrame | pd.Series):\n            raise TypeError(f'requires pd.DataFrame or pd.Series, but {type(obj)} is passed.')\n        obj.to_csv(file, mode='wt', index=False, sep=self._sep, header=True, encoding=self._encoding)\n\n\n_JsonOrient = Literal['split', 'records', 'index', 'table', 'columns', 'values']\n\n\nclass JsonFileProcessorPandas(FileProcessor):\n    \"\"\"JSON file processor for pandas DataFrames.\"\"\"\n\n    def __init__(self, orient: _JsonOrient | None = None):\n        self._orient: _JsonOrient | None = orient\n\n    def format(self):\n        return luigi.format.Nop\n\n    def load(self, file):\n        try:\n            return pd.read_json(file, orient=self._orient, lines=True if self._orient == 'records' else False)\n        except pd.errors.EmptyDataError:\n            return pd.DataFrame()\n\n    def dump(self, obj, file):\n        if isinstance(obj, dict):\n            obj = pd.DataFrame.from_dict(obj)\n        if not isinstance(obj, pd.DataFrame | pd.Series):\n            raise TypeError(f'requires pd.DataFrame or pd.Series or dict, but {type(obj)} is passed.')\n        obj.to_json(file, orient=self._orient, lines=True if self._orient == 'records' else False)\n\n\nclass ParquetFileProcessorPandas(FileProcessor):\n    \"\"\"Parquet file processor for pandas DataFrames.\"\"\"\n\n    def __init__(self, engine: Literal['auto', 'pyarrow', 'fastparquet'] = 'pyarrow', compression: str | None = None) -> None:\n        self._engine: Literal['auto', 'pyarrow', 'fastparquet'] = engine\n        self._compression = compression\n        super().__init__()\n\n    def format(self):\n        return luigi.format.Nop\n\n    def load(self, file):\n        # FIXME(mamo3gr): enable streaming (chunked) read with S3.\n        # pandas.read_parquet accepts file-like object\n        # but file (luigi.contrib.s3.ReadableS3File) should have 'tell' method,\n        # which is needed for pandas to read a file in chunks.\n        if ObjectStorage.is_buffered_reader(file):\n            return pd.read_parquet(file.name)\n        else:\n            return pd.read_parquet(BytesIO(file.read()))\n\n    def dump(self, obj, file):\n        if not isinstance(obj, pd.DataFrame):\n            raise TypeError(f'requires pd.DataFrame, but {type(obj)} is passed.')\n        # MEMO: to_parquet only supports a filepath as string (not a file handle)\n        obj.to_parquet(file.name, index=False, engine=self._engine, compression=self._compression)\n\n\nclass FeatherFileProcessorPandas(FileProcessor):\n    \"\"\"Feather file processor for pandas DataFrames.\"\"\"\n\n    def __init__(self, store_index_in_feather: bool):\n        super().__init__()\n        self._store_index_in_feather = store_index_in_feather\n        self.INDEX_COLUMN_PREFIX = '__feather_gokart_index__'\n\n    def format(self):\n        return luigi.format.Nop\n\n    def load(self, file):\n        # FIXME(mamo3gr): enable streaming (chunked) read with S3.\n        # pandas.read_feather accepts file-like object\n        # but file (luigi.contrib.s3.ReadableS3File) should have 'tell' method,\n        # which is needed for pandas to read a file in chunks.\n        if ObjectStorage.is_buffered_reader(file):\n            loaded_df = pd.read_feather(file.name)\n        else:\n            loaded_df = pd.read_feather(BytesIO(file.read()))\n\n        if self._store_index_in_feather:\n            if any(col.startswith(self.INDEX_COLUMN_PREFIX) for col in loaded_df.columns):\n                index_columns = [col_name for col_name in loaded_df.columns[::-1] if col_name[: len(self.INDEX_COLUMN_PREFIX)] == self.INDEX_COLUMN_PREFIX]\n                index_column = index_columns[0]\n                index_name = index_column[len(self.INDEX_COLUMN_PREFIX) :]\n                if index_name == 'None':\n                    index_name = None\n                loaded_df.index = pd.Index(loaded_df[index_column].values, name=index_name)\n                loaded_df = loaded_df.drop(columns=[index_column])\n\n        return loaded_df\n\n    def dump(self, obj, file):\n        if not isinstance(obj, pd.DataFrame):\n            raise TypeError(f'requires pd.DataFrame, but {type(obj)} is passed.')\n\n        dump_obj = obj.copy()\n\n        if self._store_index_in_feather:\n            index_column_name = f'{self.INDEX_COLUMN_PREFIX}{dump_obj.index.name}'\n            assert index_column_name not in dump_obj.columns, (\n                f'column name {index_column_name} already exists in dump_obj. \\nConsider not saving index by setting store_index_in_feather=False.'\n            )\n            assert dump_obj.index.name != 'None', 'index name is \"None\", which is not allowed in gokart. Consider setting another index name.'\n\n            dump_obj[index_column_name] = dump_obj.index\n            dump_obj = dump_obj.reset_index(drop=True)\n\n        # to_feather supports \"binary\" file-like object, but file variable is text\n        dump_obj.to_feather(file.name)\n"
  },
  {
    "path": "gokart/file_processor/polars.py",
    "content": "\"\"\"Polars-specific file processor implementations.\"\"\"\n\nfrom __future__ import annotations\n\nfrom io import BytesIO\nfrom typing import TYPE_CHECKING, Literal\n\nimport luigi\nimport luigi.format\nfrom luigi.format import TextFormat\n\nfrom gokart.file_processor.base import FileProcessor\nfrom gokart.object_storage import ObjectStorage\n\n_CsvEncoding = Literal['utf8', 'utf8-lossy']\n_ParquetCompression = Literal['lz4', 'uncompressed', 'snappy', 'gzip', 'brotli', 'zstd']\n\ntry:\n    import polars as pl\n\n    HAS_POLARS = True\nexcept ImportError:\n    HAS_POLARS = False\n\nif TYPE_CHECKING:\n    import polars as pl\n\n\nclass CsvFileProcessorPolars(FileProcessor):\n    \"\"\"CSV file processor for polars DataFrames.\"\"\"\n\n    def __init__(self, sep: str = ',', encoding: str = 'utf-8', lazy: bool = False) -> None:\n        if not HAS_POLARS:\n            raise ImportError(\"polars is required for polars-based dataframe types ('polars' or 'polars-lazy'). Install with: pip install polars\")\n        self._sep = sep\n        self._encoding = encoding\n        self._lazy = lazy\n        super().__init__()\n\n    def format(self):\n        return TextFormat(encoding=self._encoding)\n\n    def load(self, file):\n        try:\n            # scan_csv/read_csv only support 'utf8' and 'utf8-lossy'\n            encoding: _CsvEncoding = 'utf8' if self._encoding in ('utf-8', 'utf8') else 'utf8-lossy'\n            if self._lazy:\n                # scan_csv requires a file path, not a file object\n                return pl.scan_csv(file.name, separator=self._sep, encoding=encoding)\n            return pl.read_csv(file, separator=self._sep, encoding=encoding)\n        except Exception as e:\n            # Handle empty data gracefully\n            if 'empty' in str(e).lower() or 'no data' in str(e).lower():\n                return pl.LazyFrame() if self._lazy else pl.DataFrame()\n            raise\n\n    def dump(self, obj, file):\n        if isinstance(obj, pl.LazyFrame):\n            obj = obj.collect()\n        if not isinstance(obj, pl.DataFrame):\n            raise TypeError(f'requires pl.DataFrame or pl.LazyFrame, but {type(obj)} is passed.')\n        obj.write_csv(file, separator=self._sep, include_header=True)\n\n\nclass JsonFileProcessorPolars(FileProcessor):\n    \"\"\"JSON file processor for polars DataFrames.\"\"\"\n\n    def __init__(self, orient: str | None = None, lazy: bool = False):\n        if not HAS_POLARS:\n            raise ImportError(\"polars is required for polars-based dataframe types ('polars' or 'polars-lazy'). Install with: pip install polars\")\n        self._orient = orient\n        self._lazy = lazy\n\n    def format(self):\n        return luigi.format.Nop\n\n    def load(self, file):\n        try:\n            if self._orient == 'records':\n                if self._lazy:\n                    return pl.scan_ndjson(file)\n                return pl.read_ndjson(file)\n            else:\n                # polars doesn't have scan_json, so we read and convert if lazy\n                df = pl.read_json(file)\n                return df.lazy() if self._lazy else df\n        except Exception as e:\n            # Handle empty files\n            if 'empty' in str(e).lower() or 'no data' in str(e).lower():\n                return pl.LazyFrame() if self._lazy else pl.DataFrame()\n            raise\n\n    def dump(self, obj, file):\n        if isinstance(obj, pl.LazyFrame):\n            obj = obj.collect()\n        if not isinstance(obj, pl.DataFrame):\n            raise TypeError(f'requires pl.DataFrame or pl.LazyFrame, but {type(obj)} is passed.')\n        if self._orient == 'records':\n            obj.write_ndjson(file)\n        else:\n            obj.write_json(file)\n\n\nclass ParquetFileProcessorPolars(FileProcessor):\n    \"\"\"Parquet file processor for polars DataFrames.\"\"\"\n\n    def __init__(self, engine: str = 'pyarrow', compression: _ParquetCompression | None = None, lazy: bool = False) -> None:\n        if not HAS_POLARS:\n            raise ImportError(\"polars is required for polars-based dataframe types ('polars' or 'polars-lazy'). Install with: pip install polars\")\n        self._engine = engine  # Ignored for polars\n        self._compression: _ParquetCompression | None = compression\n        self._lazy = lazy\n        super().__init__()\n\n    def format(self):\n        return luigi.format.Nop\n\n    def load(self, file):\n        # polars.read_parquet can handle file paths or file-like objects\n        if ObjectStorage.is_buffered_reader(file):\n            if self._lazy:\n                return pl.scan_parquet(file.name)\n            return pl.read_parquet(file.name)\n        else:\n            data = BytesIO(file.read())\n            if self._lazy:\n                # scan_parquet doesn't work with BytesIO, so read and convert\n                return pl.read_parquet(data).lazy()\n            return pl.read_parquet(data)\n\n    def dump(self, obj, file):\n        if isinstance(obj, pl.LazyFrame):\n            obj = obj.collect()\n        if not isinstance(obj, pl.DataFrame):\n            raise TypeError(f'requires pl.DataFrame or pl.LazyFrame, but {type(obj)} is passed.')\n        # polars write_parquet requires a file path; default to 'zstd' when compression is None\n        obj.write_parquet(file.name, compression=self._compression or 'zstd')\n\n\nclass FeatherFileProcessorPolars(FileProcessor):\n    \"\"\"Feather file processor for polars DataFrames.\"\"\"\n\n    def __init__(self, store_index_in_feather: bool, lazy: bool = False):\n        if not HAS_POLARS:\n            raise ImportError(\"polars is required for polars-based dataframe types ('polars' or 'polars-lazy'). Install with: pip install polars\")\n        super().__init__()\n        self._store_index_in_feather = store_index_in_feather  # Ignored for polars\n        self._lazy = lazy\n\n    def format(self):\n        return luigi.format.Nop\n\n    def load(self, file):\n        # polars uses read_ipc for feather format\n        if ObjectStorage.is_buffered_reader(file):\n            if self._lazy:\n                return pl.scan_ipc(file.name)\n            return pl.read_ipc(file.name)\n        else:\n            data = BytesIO(file.read())\n            if self._lazy:\n                # scan_ipc doesn't work with BytesIO, so read and convert\n                return pl.read_ipc(data).lazy()\n            return pl.read_ipc(data)\n\n    def dump(self, obj, file):\n        if isinstance(obj, pl.LazyFrame):\n            obj = obj.collect()\n        if not isinstance(obj, pl.DataFrame):\n            raise TypeError(f'requires pl.DataFrame or pl.LazyFrame, but {type(obj)} is passed.')\n        # polars uses write_ipc for feather format\n        # Note: store_index_in_feather is ignored for polars as it's pandas-specific\n        obj.write_ipc(file.name)\n"
  },
  {
    "path": "gokart/file_processor.py",
    "content": ""
  },
  {
    "path": "gokart/gcs_config.py",
    "content": "from __future__ import annotations\n\nimport json\nimport os\nfrom typing import cast\n\nimport luigi\nimport luigi.contrib.gcs\nfrom google.oauth2.service_account import Credentials\n\n\nclass GCSConfig(luigi.Config):\n    gcs_credential_name: luigi.StrParameter = luigi.StrParameter(default='GCS_CREDENTIAL', description='GCS credential environment variable.')\n    _client = None\n\n    def get_gcs_client(self) -> luigi.contrib.gcs.GCSClient:\n        if self._client is None:  # use cache as like singleton object\n            self._client = self._get_gcs_client()\n        return self._client\n\n    def _get_gcs_client(self) -> luigi.contrib.gcs.GCSClient:\n        return luigi.contrib.gcs.GCSClient(oauth_credentials=self._load_oauth_credentials())\n\n    def _load_oauth_credentials(self) -> Credentials | None:\n        json_str = os.environ.get(self.gcs_credential_name)\n        if not json_str:\n            return None\n\n        if os.path.isfile(json_str):\n            return cast(Credentials, Credentials.from_service_account_file(json_str))\n\n        return cast(Credentials, Credentials.from_service_account_info(json.loads(json_str)))\n"
  },
  {
    "path": "gokart/gcs_obj_metadata_client.py",
    "content": "from __future__ import annotations\n\nimport copy\nimport functools\nimport json\nimport re\nfrom collections.abc import Iterable\nfrom logging import getLogger\nfrom typing import Any, Final\nfrom urllib.parse import urlsplit\n\nfrom googleapiclient.model import makepatch\n\nfrom gokart.gcs_config import GCSConfig\nfrom gokart.required_task_output import RequiredTaskOutput\nfrom gokart.utils import FlattenableItems\n\nlogger = getLogger(__name__)\n\n\nclass GCSObjectMetadataClient:\n    \"\"\"\n    This class is Utility-Class, so should not be initialized.\n    This class used for adding metadata as labels.\n    \"\"\"\n\n    # Maximum metadata size for GCS objects (8 KiB)\n    MAX_GCS_METADATA_SIZE: Final[int] = 8 * 1024\n\n    @staticmethod\n    def _is_log_related_path(path: str) -> bool:\n        return re.match(r'^gs://.+?/log/(processing_time/|task_info/|task_log/|module_versions/|random_seed/|task_params/).+', path) is not None\n\n    # This is the copied method of luigi.gcs._path_to_bucket_and_key(path).\n    @staticmethod\n    def _path_to_bucket_and_key(path: str) -> tuple[str, str]:\n        (scheme, netloc, path, _, _) = urlsplit(path)\n        assert scheme == 'gs'\n        path_without_initial_slash = path[1:]\n        return netloc, path_without_initial_slash\n\n    @staticmethod\n    def add_task_state_labels(\n        path: str,\n        task_params: dict[str, str] | None = None,\n        custom_labels: dict[str, str] | None = None,\n        required_task_outputs: FlattenableItems[RequiredTaskOutput] | None = None,\n    ) -> None:\n        if GCSObjectMetadataClient._is_log_related_path(path):\n            return\n        # In gokart/object_storage.get_time_stamp, could find same call.\n        # _path_to_bucket_and_key is a private method, so, this might not be acceptable.\n        bucket, obj = GCSObjectMetadataClient._path_to_bucket_and_key(path)\n        _response = GCSConfig().get_gcs_client().client.objects().get(bucket=bucket, object=obj).execute()\n        if _response is None:\n            logger.error(f'failed to get object from GCS bucket {bucket} and object {obj}.')\n            return\n        response: dict[str, Any] = dict(_response)\n        original_metadata: dict[Any, Any] = {}\n        if 'metadata' in response.keys():\n            _metadata = response.get('metadata')\n            if _metadata is not None:\n                original_metadata = dict(_metadata)\n        patched_metadata = GCSObjectMetadataClient._get_patched_obj_metadata(\n            copy.deepcopy(original_metadata),\n            task_params,\n            custom_labels,\n            required_task_outputs,\n        )\n        if original_metadata != patched_metadata:\n            # If we use update api, existing object metadata are removed, so should use patch api.\n            # See the official document descriptions.\n            # [Link] https://cloud.google.com/storage/docs/viewing-editing-metadata?hl=ja#rest-set-object-metadata\n            update_response = (\n                GCSConfig()\n                .get_gcs_client()\n                .client.objects()\n                .patch(\n                    bucket=bucket,\n                    object=obj,\n                    body=makepatch({'metadata': original_metadata}, {'metadata': patched_metadata}),\n                )\n                .execute()\n            )\n            if update_response is None:\n                logger.error(f'failed to patch object {obj} in bucket {bucket} and object {obj}.')\n\n    @staticmethod\n    def _normalize_labels(labels: dict[str, Any] | None) -> dict[str, str]:\n        return {str(key): str(value) for key, value in labels.items()} if labels else {}\n\n    @staticmethod\n    def _get_patched_obj_metadata(\n        metadata: Any,\n        task_params: dict[str, str] | None = None,\n        custom_labels: dict[str, str] | None = None,\n        required_task_outputs: FlattenableItems[RequiredTaskOutput] | None = None,\n    ) -> dict[str, Any] | Any:\n        # If metadata from response when getting bucket and object information is not dictionary,\n        # something wrong might be happened, so return original metadata, no patched.\n        if not isinstance(metadata, dict):\n            logger.warning(f'metadata is not a dict: {metadata}, something wrong was happened when getting response when get bucket and object information.')\n            return metadata\n        # Maximum size of metadata for each object is 8 KiB.\n        # [Link]: https://cloud.google.com/storage/quotas#objects\n        normalized_task_params_labels = GCSObjectMetadataClient._normalize_labels(task_params)\n        normalized_custom_labels = GCSObjectMetadataClient._normalize_labels(custom_labels)\n        # There is a possibility that the keys of user-provided labels(custom_labels) may conflict with those generated from task parameters (task_params_labels).\n        # However, users who utilize custom_labels are no longer expected to search using the labels generated from task parameters.\n        # Instead, users are expected to search using the labels they provided.\n        # Therefore, in the event of a key conflict, the value registered by the user-provided labels will take precedence.\n        normalized_labels = [normalized_custom_labels, normalized_task_params_labels]\n        if required_task_outputs:\n            normalized_labels.append({'__required_task_outputs': json.dumps(GCSObjectMetadataClient._get_serialized_string(required_task_outputs))})\n\n        _merged_labels = GCSObjectMetadataClient._merge_custom_labels_and_task_params_labels(normalized_labels)\n        return GCSObjectMetadataClient._adjust_gcs_metadata_limit_size(dict(metadata) | _merged_labels)\n\n    @staticmethod\n    def _get_serialized_string(required_task_outputs: FlattenableItems[RequiredTaskOutput]) -> FlattenableItems[str]:\n        if isinstance(required_task_outputs, RequiredTaskOutput):\n            return required_task_outputs.serialize()\n        elif isinstance(required_task_outputs, dict):\n            return {k: GCSObjectMetadataClient._get_serialized_string(v) for k, v in required_task_outputs.items()}\n        elif isinstance(required_task_outputs, Iterable):\n            return [GCSObjectMetadataClient._get_serialized_string(ro) for ro in required_task_outputs]\n        else:\n            raise TypeError(\n                f'Unsupported type for required_task_outputs: {type(required_task_outputs)}. '\n                'It should be RequiredTaskOutput, dict, or iterable of RequiredTaskOutput.'\n            )\n\n    @staticmethod\n    def _merge_custom_labels_and_task_params_labels(\n        normalized_labels_list: list[dict[str, str]],\n    ) -> dict[str, str]:\n        def __merge_two_dicts_helper(merged: dict[str, str], current_labels: dict[str, str]) -> dict[str, str]:\n            next_merged = copy.deepcopy(merged)\n            for label_name, label_value in current_labels.items():\n                if len(label_value) == 0:\n                    logger.warning(f'value of label_name={label_name} is empty. So skip to add as a metadata.')\n                    continue\n                if label_name in next_merged:\n                    logger.warning(f'label_name={label_name} is already seen. So skip to add as metadata.')\n                    continue\n                next_merged[label_name] = label_value\n            return next_merged\n\n        return functools.reduce(__merge_two_dicts_helper, normalized_labels_list, {})\n\n    # Google Cloud Storage(GCS) has a limitation of metadata size, 8 KiB.\n    # So, we need to adjust the size of metadata.\n    @staticmethod\n    def _adjust_gcs_metadata_limit_size(_labels: dict[str, str]) -> dict[str, str]:\n        def _get_label_size(label_name: str, label_value: str) -> int:\n            return len(label_name.encode('utf-8')) + len(label_value.encode('utf-8'))\n\n        labels = copy.deepcopy(_labels)\n        max_gcs_metadata_size, current_total_metadata_size = (\n            GCSObjectMetadataClient.MAX_GCS_METADATA_SIZE,\n            sum(_get_label_size(label_name, label_value) for label_name, label_value in labels.items()),\n        )\n        if current_total_metadata_size <= max_gcs_metadata_size:\n            return labels\n        # NOTE: remove labels to stay within max metadata size.\n        to_remove = []\n        for label_name, label_value in reversed(tuple(labels.items())):\n            size = _get_label_size(label_name, label_value)\n            to_remove.append(label_name)\n            current_total_metadata_size -= size\n            if current_total_metadata_size <= max_gcs_metadata_size:\n                break\n\n        for key in to_remove:\n            del labels[key]\n        return labels\n"
  },
  {
    "path": "gokart/gcs_zip_client.py",
    "content": "from __future__ import annotations\n\nimport os\nimport shutil\nfrom typing import cast\n\nfrom gokart.gcs_config import GCSConfig\nfrom gokart.zip_client import ZipClient, _unzip_file\n\n\nclass GCSZipClient(ZipClient):\n    def __init__(self, file_path: str, temporary_directory: str) -> None:\n        self._file_path = file_path\n        self._temporary_directory = temporary_directory\n        self._client = GCSConfig().get_gcs_client()\n\n    def exists(self) -> bool:\n        return cast(bool, self._client.exists(self._file_path))\n\n    def make_archive(self) -> None:\n        extension = os.path.splitext(self._file_path)[1]\n        shutil.make_archive(base_name=self._temporary_directory, format=extension[1:], root_dir=self._temporary_directory)\n        self._client.put(self._temporary_file_path(), self._file_path)\n\n    def unpack_archive(self) -> None:\n        os.makedirs(self._temporary_directory, exist_ok=True)\n        file_pointer = self._client.download(self._file_path)\n        _unzip_file(fp=file_pointer, extract_dir=self._temporary_directory)\n\n    def remove(self) -> None:\n        self._client.remove(self._file_path)\n\n    @property\n    def path(self) -> str:\n        return self._file_path\n\n    def _temporary_file_path(self):\n        extension = os.path.splitext(self._file_path)[1]\n        base_name = self._temporary_directory\n        if base_name.endswith('/'):\n            base_name = base_name[:-1]\n        return base_name + extension\n"
  },
  {
    "path": "gokart/in_memory/__init__.py",
    "content": "__all__ = [\n    'InMemoryCacheRepository',\n    'InMemoryTarget',\n    'make_in_memory_target',\n]\n\nfrom .repository import InMemoryCacheRepository\nfrom .target import InMemoryTarget, make_in_memory_target\n"
  },
  {
    "path": "gokart/in_memory/data.py",
    "content": "from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import Any\n\n\n@dataclass\nclass InMemoryData:\n    value: Any\n    last_modification_time: datetime\n\n    @classmethod\n    def create_data(self, value: Any) -> InMemoryData:\n        return InMemoryData(value=value, last_modification_time=datetime.now())\n"
  },
  {
    "path": "gokart/in_memory/repository.py",
    "content": "from __future__ import annotations\n\nfrom collections.abc import Iterator\nfrom datetime import datetime\nfrom typing import Any\n\nfrom .data import InMemoryData\n\n\nclass InMemoryCacheRepository:\n    _cache: dict[str, InMemoryData] = {}\n\n    def __init__(self):\n        pass\n\n    def get_value(self, key: str) -> Any:\n        return self._get_data(key).value\n\n    def get_last_modification_time(self, key: str) -> datetime:\n        return self._get_data(key).last_modification_time\n\n    def _get_data(self, key: str) -> InMemoryData:\n        return self._cache[key]\n\n    def set_value(self, key: str, obj: Any) -> None:\n        data = InMemoryData.create_data(obj)\n        self._cache[key] = data\n\n    def has(self, key: str) -> bool:\n        return key in self._cache\n\n    def remove(self, key: str) -> None:\n        assert self.has(key), f'{key} does not exist.'\n        del self._cache[key]\n\n    def empty(self) -> bool:\n        return not self._cache\n\n    def clear(self) -> None:\n        self._cache.clear()\n\n    def get_gen(self) -> Iterator[tuple[str, Any]]:\n        for key, data in self._cache.items():\n            yield key, data.value\n\n    @property\n    def size(self) -> int:\n        return len(self._cache)\n"
  },
  {
    "path": "gokart/in_memory/target.py",
    "content": "from __future__ import annotations\n\nfrom datetime import datetime\nfrom typing import Any\n\nfrom gokart.in_memory.repository import InMemoryCacheRepository\nfrom gokart.required_task_output import RequiredTaskOutput\nfrom gokart.target import TargetOnKart, TaskLockParams\nfrom gokart.utils import FlattenableItems\n\n_repository = InMemoryCacheRepository()\n\n\nclass InMemoryTarget(TargetOnKart):\n    def __init__(self, data_key: str, task_lock_param: TaskLockParams):\n        if task_lock_param.should_task_lock:\n            raise ValueError('Redis with `InMemoryTarget` is not currently supported.')\n\n        self._data_key = data_key\n        self._task_lock_params = task_lock_param\n\n    def _exists(self) -> bool:\n        return _repository.has(self._data_key)\n\n    def _get_task_lock_params(self) -> TaskLockParams:\n        return self._task_lock_params\n\n    def _load(self) -> Any:\n        return _repository.get_value(self._data_key)\n\n    def _dump(\n        self,\n        obj: Any,\n        task_params: dict[str, str] | None = None,\n        custom_labels: dict[str, str] | None = None,\n        required_task_outputs: FlattenableItems[RequiredTaskOutput] | None = None,\n    ) -> None:\n        return _repository.set_value(self._data_key, obj)\n\n    def _remove(self) -> None:\n        _repository.remove(self._data_key)\n\n    def _last_modification_time(self) -> datetime:\n        if not _repository.has(self._data_key):\n            raise ValueError(f'No object(s) which id is {self._data_key} are stored before.')\n        time = _repository.get_last_modification_time(self._data_key)\n        return time\n\n    def _path(self) -> str:\n        # TODO: this module name `_path` migit not be appropriate\n        return self._data_key\n\n\ndef make_in_memory_target(target_key: str, task_lock_params: TaskLockParams) -> InMemoryTarget:\n    return InMemoryTarget(target_key, task_lock_params)\n"
  },
  {
    "path": "gokart/info.py",
    "content": "from __future__ import annotations\n\nfrom logging import getLogger\nfrom typing import Any\n\nimport luigi\n\nfrom gokart.task import TaskOnKart\nfrom gokart.tree.task_info import make_task_info_as_tree_str\n\nlogger = getLogger(__name__)\n\n\ndef make_tree_info(\n    task: TaskOnKart[Any],\n    indent: str = '',\n    last: bool = True,\n    details: bool = False,\n    abbr: bool = True,\n    visited_tasks: set[str] | None = None,\n    ignore_task_names: list[str] | None = None,\n) -> str:\n    \"\"\"\n    Return a string representation of the tasks, their statuses/parameters in a dependency tree format\n\n    This function has moved to `gokart.tree.task_info.make_task_info_as_tree_str`.\n    This code is remained for backward compatibility.\n\n    Parameters\n    ----------\n    - task: TaskOnKart\n        Root task.\n    - details: bool\n        Whether or not to output details.\n    - abbr: bool\n        Whether or not to simplify tasks information that has already appeared.\n    - ignore_task_names: list[str] | None\n        List of task names to ignore.\n    Returns\n    -------\n    - tree_info : str\n        Formatted task dependency tree.\n    \"\"\"\n    return make_task_info_as_tree_str(task=task, details=details, abbr=abbr, ignore_task_names=ignore_task_names)\n\n\nclass tree_info(TaskOnKart[Any]):\n    mode: luigi.StrParameter = luigi.StrParameter(default='', description='This must be in [\"simple\", \"all\"].')\n    output_path: luigi.StrParameter = luigi.StrParameter(default='tree.txt', description='Output file path.')\n\n    def output(self):\n        return self.make_target(self.output_path, use_unique_id=False)\n"
  },
  {
    "path": "gokart/mypy.py",
    "content": "\"\"\"Plugin that provides support for gokart.TaskOnKart.\n\nThis Code reuses the code from mypy.plugins.dataclasses\nhttps://github.com/python/mypy/blob/0753e2a82dad35034e000609b6e8daa37238bfaa/mypy/plugins/dataclasses.py\n\"\"\"\n\nfrom __future__ import annotations\n\nimport re\nimport sys\nimport warnings\nfrom collections.abc import Callable, Iterator\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Any, Final, Literal\n\nimport luigi\nfrom mypy.expandtype import expand_type\nfrom mypy.nodes import (\n    ARG_NAMED,\n    ARG_NAMED_OPT,\n    ArgKind,\n    Argument,\n    AssignmentStmt,\n    Block,\n    CallExpr,\n    ClassDef,\n    EllipsisExpr,\n    Expression,\n    IfStmt,\n    JsonDict,\n    MemberExpr,\n    NameExpr,\n    PlaceholderNode,\n    RefExpr,\n    Statement,\n    TempNode,\n    TypeInfo,\n    Var,\n)\nfrom mypy.options import Options\nfrom mypy.plugin import ClassDefContext, FunctionContext, Plugin, SemanticAnalyzerPluginInterface\nfrom mypy.plugins.common import (\n    add_method_to_class,\n    deserialize_and_fixup_type,\n)\nfrom mypy.server.trigger import make_wildcard_trigger\nfrom mypy.state import state\nfrom mypy.typeops import map_type_from_supertype\nfrom mypy.types import (\n    AnyType,\n    Instance,\n    NoneType,\n    Type,\n    TypeOfAny,\n    UnionType,\n)\nfrom mypy.typevars import fill_typevars\n\nMETADATA_TAG: Final[str] = 'task_on_kart'\n\nPARAMETER_FULLNAME_MATCHER: Final = re.compile(r'^(gokart|luigi)(\\.parameter)?\\.\\w*Parameter$')\nPARAMETER_TMP_MATCHER: Final = re.compile(r'^\\w*Parameter$')\n\n\nclass PluginOptions(Enum):\n    DISALLOW_MISSING_PARAMETERS = 'disallow_missing_parameters'\n\n\n@dataclass\nclass TaskOnKartPluginOptions:\n    # Whether to error on missing parameters in the constructor.\n    # Some projects use luigi.Config to set parameters, which does not require parameters to be explicitly passed to the constructor.\n    disallow_missing_parameters: bool = False\n\n    @classmethod\n    def _parse_toml(cls, config_file: str) -> dict[str, Any]:\n        if sys.version_info >= (3, 11):\n            import tomllib as toml_\n        else:\n            try:\n                import tomli as toml_\n            except ImportError:  # pragma: no cover\n                warnings.warn('install tomli to parse pyproject.toml under Python 3.10', stacklevel=1)\n                return {}\n\n        with open(config_file, 'rb') as f:\n            return toml_.load(f)\n\n    @classmethod\n    def parse_config_file(cls, config_file: str) -> TaskOnKartPluginOptions:\n        # TODO: support other configuration file formats if necessary.\n        if not config_file.endswith('.toml'):\n            warnings.warn('gokart mypy plugin can be configured by pyproject.toml', stacklevel=1)\n            return cls()\n\n        config = cls._parse_toml(config_file)\n        gokart_plugin_config = config.get('tool', {}).get('gokart-mypy', {})\n\n        disallow_missing_parameters = gokart_plugin_config.get(PluginOptions.DISALLOW_MISSING_PARAMETERS.value, False)\n        if not isinstance(disallow_missing_parameters, bool):\n            raise ValueError(f'{PluginOptions.DISALLOW_MISSING_PARAMETERS.value} must be a boolean value')\n        return cls(disallow_missing_parameters=disallow_missing_parameters)\n\n\nclass TaskOnKartPlugin(Plugin):\n    def __init__(self, options: Options) -> None:\n        super().__init__(options)\n        if options.config_file is not None:\n            self._options = TaskOnKartPluginOptions.parse_config_file(options.config_file)\n        else:\n            self._options = TaskOnKartPluginOptions()\n\n    def get_base_class_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:\n        # The following gathers attributes from gokart.TaskOnKart such as `workspace_directory`\n        # the transformation does not affect because the class has `__init__` method of `gokart.TaskOnKart`.\n        #\n        # NOTE: `gokart.task.luigi.Task` condition is required for the release of luigi versions without py.typed\n        if fullname in {'gokart.task.luigi.Task', 'luigi.task.Task'}:\n            return self._task_on_kart_class_maker_callback\n\n        sym = self.lookup_fully_qualified(fullname)\n        if sym and isinstance(sym.node, TypeInfo):\n            if any(base.fullname == 'gokart.task.TaskOnKart' for base in sym.node.mro):\n                return self._task_on_kart_class_maker_callback\n        return None\n\n    def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:\n        \"\"\"Adjust the return type of the `Parameters` function.\"\"\"\n        if PARAMETER_FULLNAME_MATCHER.match(fullname):\n            return self._task_on_kart_parameter_field_callback\n        return None\n\n    def _task_on_kart_class_maker_callback(self, ctx: ClassDefContext) -> None:\n        transformer = TaskOnKartTransformer(ctx.cls, ctx.reason, ctx.api, self._options)\n        transformer.transform()\n\n    def _task_on_kart_parameter_field_callback(self, ctx: FunctionContext) -> Type:\n        \"\"\"Extract the type of the `default` argument from the Field function, and use it as the return type.\n\n        In particular:\n        * Retrieve the type of the argument which is specified, and use it as return type for the function.\n        * If no default argument is specified, return AnyType with unannotated type instead of parameter types like `luigi.Parameter()`\n          This makes mypy avoid conflict between the type annotation and the parameter type.\n          e.g.\n          ```python\n          foo: int = luigi.IntParameter()\n          ```\n        \"\"\"\n        try:\n            default_idx = ctx.callee_arg_names.index('default')\n        # if no `default` argument is found, return AnyType with unannotated type.\n        except ValueError:\n            return AnyType(TypeOfAny.unannotated)\n\n        default_args = ctx.args[default_idx]\n\n        if default_args:\n            default_type = ctx.arg_types[0][0]\n            default_arg = default_args[0]\n\n            # Fallback to default Any type if the field is required\n            if not isinstance(default_arg, EllipsisExpr):\n                return default_type\n        # NOTE: This is a workaround to avoid the error between type annotation and parameter type.\n        #       As the following code snippet, the type of `foo` is `int` but the assigned value is `luigi.IntParameter()`.\n        #       foo: int = luigi.IntParameter()\n        # TODO: infer mypy type from the parameter type.\n        return AnyType(TypeOfAny.unannotated)\n\n\nclass TaskOnKartAttribute:\n    def __init__(\n        self,\n        name: str,\n        has_default: bool,\n        line: int,\n        column: int,\n        type: Type | None,\n        info: TypeInfo,\n        api: SemanticAnalyzerPluginInterface,\n        options: TaskOnKartPluginOptions,\n    ) -> None:\n        self.name = name\n        self.has_default = has_default\n        self.line = line\n        self.column = column\n        self.type = type  # Type as __init__ argument\n        self.info = info\n        self._api = api\n        self._options = options\n\n    def to_argument(self, current_info: TypeInfo, *, of: Literal['__init__',]) -> Argument:\n        if of == '__init__':\n            arg_kind = self._get_arg_kind_by_options()\n\n        return Argument(\n            variable=self.to_var(current_info),\n            type_annotation=self.expand_type(current_info),\n            initializer=EllipsisExpr() if self.has_default else None,  # Only used by stubgen\n            kind=arg_kind,\n        )\n\n    def expand_type(self, current_info: TypeInfo) -> Type | None:\n        if self.type is not None and self.info.self_type is not None:\n            # In general, it is not safe to call `expand_type()` during semantic analysis,\n            # however this plugin is called very late, so all types should be fully ready.\n            # Also, it is tricky to avoid eager expansion of Self types here (e.g. because\n            # we serialize attributes).\n            with state.strict_optional_set(self._api.options.strict_optional):\n                return expand_type(self.type, {self.info.self_type.id: fill_typevars(current_info)})\n        return self.type\n\n    def to_var(self, current_info: TypeInfo) -> Var:\n        return Var(self.name, self.expand_type(current_info))\n\n    def serialize(self) -> JsonDict:\n        assert self.type\n        return {\n            'name': self.name,\n            'has_default': self.has_default,\n            'line': self.line,\n            'column': self.column,\n            'type': self.type.serialize(),\n        }\n\n    @classmethod\n    def deserialize(cls, info: TypeInfo, data: JsonDict, api: SemanticAnalyzerPluginInterface, options: TaskOnKartPluginOptions) -> TaskOnKartAttribute:\n        data = data.copy()\n        typ = deserialize_and_fixup_type(data.pop('type'), api)\n        return cls(type=typ, info=info, **data, api=api, options=options)\n\n    def expand_typevar_from_subtype(self, sub_type: TypeInfo) -> None:\n        \"\"\"Expands type vars in the context of a subtype when an attribute is inherited\n        from a generic super type.\"\"\"\n        if self.type is not None:\n            with state.strict_optional_set(self._api.options.strict_optional):\n                self.type = map_type_from_supertype(self.type, sub_type, self.info)\n\n    def _get_arg_kind_by_options(self) -> Literal[ArgKind.ARG_NAMED, ArgKind.ARG_NAMED_OPT]:\n        \"\"\"Set the argument kind based on the options.\n\n        if `disallow_missing_parameters` is True, the argument kind is `ARG_NAMED` when the attribute has no default value.\n        This means the that all the parameters are passed to the constructor as keyword-only arguments.\n\n        Returns:\n            Literal[ArgKind.ARG_NAMED, ArgKind.ARG_NAMED_OPT]: The argument kind.\n        \"\"\"\n        if not self._options.disallow_missing_parameters:\n            return ARG_NAMED_OPT\n        if self.has_default:\n            return ARG_NAMED_OPT\n        # required parameter\n        return ARG_NAMED\n\n\nclass TaskOnKartTransformer:\n    \"\"\"Implement the behavior of gokart.TaskOnKart.\"\"\"\n\n    def __init__(\n        self,\n        cls: ClassDef,\n        reason: Expression | Statement,\n        api: SemanticAnalyzerPluginInterface,\n        options: TaskOnKartPluginOptions,\n    ) -> None:\n        self._cls = cls\n        self._reason = reason\n        self._api = api\n        self._options = options\n\n    def transform(self) -> bool:\n        \"\"\"Apply all the necessary transformations to the underlying gokart.TaskOnKart\"\"\"\n        info = self._cls.info\n        attributes = self.collect_attributes()\n\n        if attributes is None:\n            # Some definitions are not ready. We need another pass.\n            return False\n        for attr in attributes:\n            if attr.type is None:\n                return False\n        # If there are no attributes, it may be that the semantic analyzer has not\n        # processed them yet. In order to work around this, we can simply skip generating\n        # __init__ if there are no attributes, because if the user truly did not define any,\n        # then the object default __init__ with an empty signature will be present anyway.\n        if ('__init__' not in info.names or info.names['__init__'].plugin_generated) and attributes:\n            args = [attr.to_argument(info, of='__init__') for attr in attributes]\n            add_method_to_class(self._api, self._cls, '__init__', args=args, return_type=NoneType())\n        info.metadata[METADATA_TAG] = {\n            'attributes': [attr.serialize() for attr in attributes],\n        }\n\n        return True\n\n    def _get_assignment_statements_from_if_statement(self, stmt: IfStmt) -> Iterator[AssignmentStmt]:\n        for body in stmt.body:\n            if not body.is_unreachable:\n                yield from self._get_assignment_statements_from_block(body)\n        if stmt.else_body is not None and not stmt.else_body.is_unreachable:\n            yield from self._get_assignment_statements_from_block(stmt.else_body)\n\n    def _get_assignment_statements_from_block(self, block: Block) -> Iterator[AssignmentStmt]:\n        for stmt in block.body:\n            if isinstance(stmt, AssignmentStmt):\n                yield stmt\n            elif isinstance(stmt, IfStmt):\n                yield from self._get_assignment_statements_from_if_statement(stmt)\n\n    def collect_attributes(self) -> list[TaskOnKartAttribute] | None:\n        \"\"\"Collect all attributes declared in the task and its parents.\n\n        All assignments of the form\n\n          a: SomeType\n          b: SomeOtherType = ...\n\n        are collected.\n\n        Return None if some base class hasn't been processed\n        yet and thus we'll need to ask for another pass.\n        \"\"\"\n        cls = self._cls\n\n        # First, collect attributes belonging to any class in the MRO, ignoring duplicates.\n        #\n        # We iterate through the MRO in reverse because attrs defined in the parent must appear\n        # earlier in the attributes list than attrs defined in the child.\n        #\n        # However, we also want attributes defined in the subtype to override ones defined\n        # in the parent. We can implement this via a dict without disrupting the attr order\n        # because dicts preserve insertion order in Python 3.7+.\n        found_attrs: dict[str, TaskOnKartAttribute] = {}\n        for info in reversed(cls.info.mro[1:-1]):\n            if METADATA_TAG not in info.metadata:\n                continue\n            # Each class depends on the set of attributes in its task_on_kart ancestors.\n            self._api.add_plugin_dependency(make_wildcard_trigger(info.fullname))\n\n            for data in info.metadata[METADATA_TAG]['attributes']:\n                name: str = data['name']\n\n                attr = TaskOnKartAttribute.deserialize(info, data, self._api, self._options)\n                # TODO: We shouldn't be performing type operations during the main\n                #       semantic analysis pass, since some TypeInfo attributes might\n                #       still be in flux. This should be performed in a later phase.\n                attr.expand_typevar_from_subtype(cls.info)\n                found_attrs[name] = attr\n\n                sym_node = cls.info.names.get(name)\n                if sym_node and sym_node.node and not isinstance(sym_node.node, Var):\n                    self._api.fail(\n                        'TaskOnKart attribute may only be overridden by another attribute',\n                        sym_node.node,\n                    )\n\n        # Second, collect attributes belonging to the current class.\n        current_attr_names: set[str] = set()\n        for stmt in self._get_assignment_statements_from_block(cls.defs):\n            if not is_parameter_call(stmt.rvalue):\n                continue\n\n            # a: int, b: str = 1, 'foo' is not supported syntax so we\n            # don't have to worry about it.\n            lhs = stmt.lvalues[0]\n            if not isinstance(lhs, NameExpr):\n                continue\n            sym = cls.info.names.get(lhs.name)\n            if sym is None:\n                # There was probably a semantic analysis error.\n                continue\n\n            node = sym.node\n            assert not isinstance(node, PlaceholderNode)\n\n            assert isinstance(node, Var)\n\n            has_parameter_call, parameter_args = self._collect_parameter_args(stmt.rvalue)\n            has_default = False\n            # Ensure that something like x: int = field() is rejected\n            # after an attribute with a default.\n            if has_parameter_call:\n                has_default = 'default' in parameter_args\n\n            # All other assignments are already type checked.\n            elif not isinstance(stmt.rvalue, TempNode):\n                has_default = True\n\n            if not has_default:\n                # Make all non-default task_on_kart attributes implicit because they are de-facto\n                # set on self in the generated __init__(), not in the class body. On the other\n                # hand, we don't know how custom task_on_kart transforms initialize attributes,\n                # so we don't treat them as implicit. This is required to support descriptors\n                # (https://github.com/python/mypy/issues/14868).\n                sym.implicit = True\n\n            current_attr_names.add(lhs.name)\n            with state.strict_optional_set(self._api.options.strict_optional):\n                init_type = sym.type\n\n            # infer Parameter type\n            if init_type is None:\n                init_type = self._infer_type_from_parameters(stmt.rvalue)\n\n            found_attrs[lhs.name] = TaskOnKartAttribute(\n                name=lhs.name,\n                has_default=has_default,\n                line=stmt.line,\n                column=stmt.column,\n                type=init_type,\n                info=cls.info,\n                api=self._api,\n                options=self._options,\n            )\n\n        return list(found_attrs.values())\n\n    def _collect_parameter_args(self, expr: Expression) -> tuple[bool, dict[str, Expression]]:\n        \"\"\"Returns a tuple where the first value represents whether or not\n        the expression is a call to luigi.Parameter() or gokart.TaskInstanceParameter()\n        and the second value is a dictionary of the keyword arguments that luigi.Parameter() or gokart.TaskInstanceParameter() was called with.\n        \"\"\"\n        if isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr):\n            args = {}\n            for name, arg in zip(expr.arg_names, expr.args, strict=False):\n                if name is None:\n                    # NOTE: this is a workaround to get default value from a parameter\n                    self._api.fail(\n                        'Positional arguments are not allowed for parameters when using the mypy plugin. '\n                        \"Update your code to use named arguments, like luigi.Parameter(default='foo') instead of luigi.Parameter('foo')\",\n                        expr,\n                    )\n                    continue\n                args[name] = arg\n            return True, args\n        return False, {}\n\n    def _infer_type_from_parameters(self, parameter: Expression) -> Type | None:\n        \"\"\"\n        Generate default type from Parameter.\n        For example, when parameter is `luigi.parameter.Parameter`, this method should return `str` type.\n        \"\"\"\n        parameter_name = _extract_parameter_name(parameter)\n        if parameter_name is None:\n            return None\n\n        underlying_type: Type | None = None\n        if parameter_name in ['luigi.parameter.Parameter', 'luigi.parameter.OptionalParameter']:\n            underlying_type = self._api.named_type('builtins.str', [])\n        elif parameter_name in ['luigi.parameter.IntParameter', 'luigi.parameter.OptionalIntParameter']:\n            underlying_type = self._api.named_type('builtins.int', [])\n        elif parameter_name in ['luigi.parameter.FloatParameter', 'luigi.parameter.OptionalFloatParameter']:\n            underlying_type = self._api.named_type('builtins.float', [])\n        elif parameter_name in ['luigi.parameter.BoolParameter', 'luigi.parameter.OptionalBoolParameter']:\n            underlying_type = self._api.named_type('builtins.bool', [])\n        elif parameter_name in ['luigi.parameter.DateParameter', 'luigi.parameter.MonthParameter', 'luigi.parameter.YearParameter']:\n            underlying_type = self._api.named_type('datetime.date', [])\n        elif parameter_name in ['luigi.parameter.DateHourParameter', 'luigi.parameter.DateMinuteParameter', 'luigi.parameter.DateSecondParameter']:\n            underlying_type = self._api.named_type('datetime.datetime', [])\n        elif parameter_name in ['luigi.parameter.TimeDeltaParameter']:\n            underlying_type = self._api.named_type('datetime.timedelta', [])\n        elif parameter_name in ['luigi.parameter.DictParameter', 'luigi.parameter.OptionalDictParameter']:\n            underlying_type = self._api.named_type('builtins.dict', [AnyType(TypeOfAny.unannotated), AnyType(TypeOfAny.unannotated)])\n        elif parameter_name in ['luigi.parameter.ListParameter', 'luigi.parameter.OptionalListParameter']:\n            underlying_type = self._api.named_type('builtins.tuple', [AnyType(TypeOfAny.unannotated)])\n        elif parameter_name in ['luigi.parameter.TupleParameter', 'luigi.parameter.OptionalTupleParameter']:\n            underlying_type = self._api.named_type('builtins.tuple', [AnyType(TypeOfAny.unannotated)])\n        elif parameter_name in ['luigi.parameter.PathParameter', 'luigi.parameter.OptionalPathParameter']:\n            underlying_type = self._api.named_type('pathlib.Path', [])\n        elif parameter_name in ['gokart.parameter.TaskInstanceParameter']:\n            underlying_type = self._api.named_type('gokart.task.TaskOnKart', [AnyType(TypeOfAny.unannotated)])\n        elif parameter_name in ['gokart.parameter.ListTaskInstanceParameter']:\n            underlying_type = self._api.named_type('builtins.list', [self._api.named_type('gokart.task.TaskOnKart', [AnyType(TypeOfAny.unannotated)])])\n        elif parameter_name in ['gokart.parameter.ExplicitBoolParameter']:\n            underlying_type = self._api.named_type('builtins.bool', [])\n        elif parameter_name in ['luigi.parameter.NumericalParameter']:\n            underlying_type = self._get_type_from_args(parameter, 'var_type')\n        elif parameter_name in ['luigi.parameter.ChoiceParameter']:\n            underlying_type = self._get_type_from_args(parameter, 'var_type')\n        elif parameter_name in ['luigi.parameter.ChoiceListParameter']:\n            base_type = self._get_type_from_args(parameter, 'var_type')\n            if base_type is not None:\n                underlying_type = self._api.named_type('builtins.tuple', [base_type])\n        elif parameter_name in ['luigi.parameter.EnumParameter']:\n            underlying_type = self._get_type_from_args(parameter, 'enum')\n        elif parameter_name in ['luigi.parameter.EnumListParameter']:\n            base_type = self._get_type_from_args(parameter, 'enum')\n            if base_type is not None:\n                underlying_type = self._api.named_type('builtins.tuple', [base_type])\n\n        if underlying_type is None:\n            return None\n\n        # When parameter has Optional, it can be none value.\n        if 'Optional' in parameter_name:\n            return UnionType([underlying_type, NoneType()])\n\n        return underlying_type\n\n    def _get_type_from_args(self, parameter: Expression, arg_key: str) -> Type | None:\n        \"\"\"\n        get type from parameter arguments.\n\n        e.x)\n        When parameter is `luigi.ChoiceParameter(var_type=int)`, this method should return `int` type.\n        \"\"\"\n        ok, args = self._collect_parameter_args(parameter)\n        if not ok:\n            return None\n\n        if arg_key not in args:\n            return None\n\n        arg = args[arg_key]\n        if not isinstance(arg, NameExpr):\n            return None\n        if not isinstance(arg.node, TypeInfo):\n            return None\n        return Instance(arg.node, [])\n\n\ndef is_parameter_call(expr: Expression) -> bool:\n    \"\"\"Checks if the expression is a call to luigi.Parameter()\"\"\"\n    parameter_name = _extract_parameter_name(expr)\n    if parameter_name is None:\n        return False\n    return PARAMETER_FULLNAME_MATCHER.match(parameter_name) is not None\n\n\ndef _extract_parameter_name(expr: Expression) -> str | None:\n    \"\"\"Extract name if the expression is a call to luigi.Parameter()\"\"\"\n    if not isinstance(expr, CallExpr):\n        return None\n\n    callee = expr.callee\n    if isinstance(callee, MemberExpr):\n        type_info = callee.node\n        if type_info is None and isinstance(callee.expr, NameExpr):\n            return f'{callee.expr.name}.{callee.name}'\n    elif isinstance(callee, NameExpr):\n        type_info = callee.node\n    else:\n        return None\n\n    if isinstance(type_info, TypeInfo):\n        return type_info.fullname\n\n    # Currently, luigi doesn't provide py.typed. it will be released next to 3.5.1.\n    # https://github.com/spotify/luigi/pull/3297\n    # With the following code, we can't assume correctly.\n    #\n    # from luigi import Parameter\n    # class MyTask(gokart.TaskOnKart):\n    #     param = Parameter()\n    if isinstance(type_info, Var) and luigi.__version__ <= '3.5.1':\n        return type_info.name\n\n    return None\n\n\ndef plugin(version: str) -> type[Plugin]:\n    return TaskOnKartPlugin\n"
  },
  {
    "path": "gokart/object_storage.py",
    "content": "from __future__ import annotations\n\nfrom datetime import datetime\nfrom typing import cast\n\nimport luigi\nimport luigi.contrib.gcs\nimport luigi.contrib.s3\nfrom luigi.format import Format\n\nfrom gokart.gcs_config import GCSConfig\nfrom gokart.gcs_zip_client import GCSZipClient\nfrom gokart.s3_config import S3Config\nfrom gokart.s3_zip_client import S3ZipClient\nfrom gokart.zip_client import ZipClient\n\nobject_storage_path_prefix = ['s3://', 'gs://']\n\n\nclass ObjectStorage:\n    @staticmethod\n    def if_object_storage_path(path: str) -> bool:\n        for prefix in object_storage_path_prefix:\n            if path.startswith(prefix):\n                return True\n        return False\n\n    @staticmethod\n    def get_object_storage_target(path: str, format: Format) -> luigi.target.FileSystemTarget:\n        if path.startswith('s3://'):\n            return luigi.contrib.s3.S3Target(path, client=S3Config().get_s3_client(), format=format)\n        elif path.startswith('gs://'):\n            return luigi.contrib.gcs.GCSTarget(path, client=GCSConfig().get_gcs_client(), format=format)\n        else:\n            raise\n\n    @staticmethod\n    def exists(path: str) -> bool:\n        if path.startswith('s3://'):\n            return cast(bool, S3Config().get_s3_client().exists(path))\n        elif path.startswith('gs://'):\n            return cast(bool, GCSConfig().get_gcs_client().exists(path))\n        else:\n            raise\n\n    @staticmethod\n    def get_timestamp(path: str) -> datetime:\n        if path.startswith('s3://'):\n            return cast(datetime, S3Config().get_s3_client().get_key(path).last_modified)\n        elif path.startswith('gs://'):\n            # for gcs object\n            # should PR to luigi\n            bucket, obj = GCSConfig().get_gcs_client()._path_to_bucket_and_key(path)\n            result = GCSConfig().get_gcs_client().client.objects().get(bucket=bucket, object=obj).execute()\n            return cast(datetime, result['updated'])\n        else:\n            raise\n\n    @staticmethod\n    def get_zip_client(file_path: str, temporary_directory: str) -> ZipClient:\n        if file_path.startswith('s3://'):\n            return S3ZipClient(file_path=file_path, temporary_directory=temporary_directory)\n        elif file_path.startswith('gs://'):\n            return GCSZipClient(file_path=file_path, temporary_directory=temporary_directory)\n        else:\n            raise\n\n    @staticmethod\n    def is_buffered_reader(file: object) -> bool:\n        return not isinstance(file, luigi.contrib.s3.ReadableS3File)\n"
  },
  {
    "path": "gokart/pandas_type_config.py",
    "content": "from __future__ import annotations\n\nfrom abc import abstractmethod\nfrom logging import getLogger\nfrom typing import Any\n\nimport luigi\nimport numpy as np\nimport pandas as pd\nfrom luigi.task_register import Register\n\nlogger = getLogger(__name__)\n\n\nclass PandasTypeError(Exception):\n    \"\"\"Raised when the type of the pandas DataFrame column is not as expected.\"\"\"\n\n\nclass PandasTypeConfig(luigi.Config):\n    @classmethod\n    @abstractmethod\n    def type_dict(cls) -> dict[str, Any]:\n        pass\n\n    @classmethod\n    def check(cls, df: pd.DataFrame) -> None:\n        for column_name, column_type in cls.type_dict().items():\n            cls._check_column(df, column_name, column_type)\n\n    @classmethod\n    def _check_column(cls, df: pd.DataFrame, column_name: str, column_type: type) -> None:\n        if column_name not in df.columns:\n            return\n\n        if not np.all(list(map(lambda x: isinstance(x, column_type), df[column_name]))):\n            not_match = next(filter(lambda x: not isinstance(x, column_type), df[column_name]))\n            raise PandasTypeError(f'expected type is \"{column_type}\", but \"{type(not_match)}\" is passed in column \"{column_name}\".')\n\n\nclass PandasTypeConfigMap(luigi.Config):\n    \"\"\"To initialize this class only once, this inherits luigi.Config.\"\"\"\n\n    def __init__(self, *args: Any, **kwargs: Any) -> None:\n        super().__init__(*args, **kwargs)\n        task_names = Register.task_names()\n        task_classes = [Register.get_task_cls(task_name) for task_name in task_names]\n        self._map = {\n            task_class.task_namespace: task_class for task_class in task_classes if issubclass(task_class, PandasTypeConfig) and task_class != PandasTypeConfig\n        }\n\n    def check(self, obj: Any, task_namespace: str) -> None:\n        if isinstance(obj, pd.DataFrame) and task_namespace in self._map:\n            self._map[task_namespace].check(obj)\n"
  },
  {
    "path": "gokart/parameter.py",
    "content": "from __future__ import annotations\n\nimport bz2\nimport datetime\nimport json\nimport sys\nfrom logging import getLogger\nfrom typing import Any, Generic, Protocol, TypeVar\n\nif sys.version_info >= (3, 11):\n    from typing import Unpack\nelse:\n    from typing_extensions import Unpack\nfrom warnings import warn\n\nimport luigi\nfrom luigi import task_register\n\ntry:\n    from luigi.parameter import _no_value, _NoValueType, _ParameterKwargs\nexcept ImportError:\n    _no_value = None  # type: ignore[assignment]\n    _NoValueType = type(None)  # type: ignore[assignment,misc]\n    _ParameterKwargs = dict  # type: ignore[assignment,misc]\n\nimport gokart\n\nlogger = getLogger(__name__)\n\n\nTASK_ON_KART_TYPE = TypeVar('TASK_ON_KART_TYPE', bound='gokart.TaskOnKart')  # type: ignore\n\n\nclass TaskInstanceParameter(luigi.Parameter[TASK_ON_KART_TYPE], Generic[TASK_ON_KART_TYPE]):\n    def __init__(\n        self,\n        expected_type: type[TASK_ON_KART_TYPE] | None = None,\n        default: TASK_ON_KART_TYPE | _NoValueType = _no_value,\n        **kwargs: Unpack[_ParameterKwargs],\n    ):\n        if expected_type is None:\n            self.expected_type: type = gokart.TaskOnKart\n        elif isinstance(expected_type, type):\n            self.expected_type = expected_type\n        else:\n            raise TypeError(f'expected_type must be a type, not {type(expected_type)}')\n        super().__init__(default=default, **kwargs)\n\n    @staticmethod\n    def _recursive(param_dict):\n        params = param_dict['params']\n        task_cls = task_register.Register.get_task_cls(param_dict['type'])\n        for key, value in task_cls.get_params():\n            if key in params:\n                params[key] = value.parse(params[key])\n        return task_cls(**params)\n\n    @staticmethod\n    def _recursive_decompress(s):\n        s = dict(luigi.DictParameter().parse(s))\n        if 'params' in s:\n            s['params'] = TaskInstanceParameter._recursive_decompress(bz2.decompress(bytes.fromhex(s['params'])).decode())\n        return s\n\n    def parse(self, s):\n        if isinstance(s, str):\n            s = self._recursive_decompress(s)\n        return self._recursive(s)\n\n    def serialize(self, x):\n        params = bz2.compress(json.dumps(x.to_str_params(only_significant=True)).encode()).hex()\n        values = dict(type=x.get_task_family(), params=params)\n        return luigi.DictParameter().serialize(values)\n\n    def _warn_on_wrong_param_type(self, param_name, param_value):\n        if not isinstance(param_value, self.expected_type):\n            raise TypeError(f'{param_value} is not an instance of {self.expected_type}')\n\n\nclass _TaskInstanceEncoder(json.JSONEncoder):\n    def default(self, obj):\n        if isinstance(obj, luigi.Task):\n            return TaskInstanceParameter().serialize(obj)\n        # Let the base class default method raise the TypeError\n        return json.JSONEncoder.default(self, obj)\n\n\nclass ListTaskInstanceParameter(luigi.Parameter[list[TASK_ON_KART_TYPE]], Generic[TASK_ON_KART_TYPE]):\n    def __init__(\n        self,\n        expected_elements_type: type[TASK_ON_KART_TYPE] | None = None,\n        default: list[TASK_ON_KART_TYPE] | _NoValueType = _no_value,\n        **kwargs: Unpack[_ParameterKwargs],\n    ):\n        if expected_elements_type is None:\n            self.expected_elements_type: type = gokart.TaskOnKart\n        elif isinstance(expected_elements_type, type):\n            self.expected_elements_type = expected_elements_type\n        else:\n            raise TypeError(f'expected_elements_type must be a type, not {type(expected_elements_type)}')\n        super().__init__(default=default, **kwargs)\n\n    def parse(self, s):\n        return [TaskInstanceParameter().parse(x) for x in list(json.loads(s))]\n\n    def serialize(self, x):\n        return json.dumps(x, cls=_TaskInstanceEncoder)\n\n    def _warn_on_wrong_param_type(self, param_name, param_value):\n        for v in param_value:\n            if not isinstance(v, self.expected_elements_type):\n                raise TypeError(f'{v} is not an instance of {self.expected_elements_type}')\n\n\nclass ExplicitBoolParameter(luigi.BoolParameter):\n    def __init__(self, *args, **kwargs):\n        luigi.Parameter.__init__(self, *args, **kwargs)\n\n    def _parser_kwargs(self, *args, **kwargs):  # type: ignore\n        return luigi.Parameter._parser_kwargs(*args, *kwargs)\n\n\nT = TypeVar('T')\n\n\nclass Serializable(Protocol):\n    def gokart_serialize(self) -> str:\n        \"\"\"Implement this method to serialize the object as an parameter\n        You can omit some fields from results of serialization if you want to ignore changes of them\n        \"\"\"\n        ...\n\n    @classmethod\n    def gokart_deserialize(cls: type[T], s: str) -> T:\n        \"\"\"Implement this method to deserialize the object from a string\"\"\"\n        ...\n\n\nS = TypeVar('S', bound=Serializable)\n\n\nclass SerializableParameter(luigi.Parameter[S], Generic[S]):\n    def __init__(self, object_type: type[S], *args: Any, **kwargs: Any) -> None:\n        self._object_type = object_type\n        super().__init__(*args, **kwargs)\n\n    def parse(self, s: str) -> S:\n        return self._object_type.gokart_deserialize(s)\n\n    def serialize(self, x: S) -> str:\n        return x.gokart_serialize()\n\n\nclass ZonedDateSecondParameter(luigi.Parameter[datetime.datetime]):\n    \"\"\"\n    ZonedDateSecondParameter supports a datetime.datetime object with timezone information.\n\n    A ZonedDateSecondParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted\n    date, time specified to the second and timezone. For example, ``2013-07-10T19:07:38+09:00`` specifies July 10, 2013 at\n    19:07:38 +09:00. The separator `:` can be omitted for Python3.11 and later.\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n\n    def parse(self, s):\n        # special character 'Z' is replaced with '+00:00'\n        # because Python 3.11 and later support fromisoformat with Z at the end of the string.\n        if s.endswith('Z'):\n            s = s[:-1] + '+00:00'\n        dt = datetime.datetime.fromisoformat(s)\n        if dt.tzinfo is None:\n            warn('The input does not have timezone information. Please consider using luigi.DateSecondParameter instead.', stacklevel=1)\n        return dt\n\n    def serialize(self, dt):\n        return dt.isoformat()\n\n    def normalize(self, dt):\n        # override _DatetimeParameterBase.normalize to avoid do nothing to normalize except removing microsecond.\n        # microsecond is removed because the number of digits of microsecond is not fixed.\n        # See also luigi's implementation  https://github.com/spotify/luigi/blob/v3.6.0/luigi/parameter.py#L612\n        return dt.replace(microsecond=0)\n"
  },
  {
    "path": "gokart/py.typed",
    "content": ""
  },
  {
    "path": "gokart/required_task_output.py",
    "content": "from dataclasses import dataclass\n\n\n@dataclass\nclass RequiredTaskOutput:\n    task_name: str\n    output_path: str\n\n    def serialize(self) -> dict[str, str]:\n        return {'__gokart_task_name': self.task_name, '__gokart_output_path': self.output_path}\n"
  },
  {
    "path": "gokart/run.py",
    "content": "from __future__ import annotations\n\nimport logging\nimport os\nimport sys\nfrom logging import getLogger\nfrom typing import Any\n\nimport luigi\nimport luigi.cmdline\nimport luigi.cmdline_parser\nimport luigi.execution_summary\nimport luigi.interface\nimport luigi.retcodes\nimport luigi.setup_logging\nfrom luigi.cmdline_parser import CmdlineParser\n\nimport gokart\nimport gokart.slack\nfrom gokart.build import WorkerSchedulerFactory\nfrom gokart.object_storage import ObjectStorage\n\nlogger = getLogger(__name__)\n\n\ndef _run_tree_info(cmdline_args, details):\n    with CmdlineParser.global_instance(cmdline_args) as cp:\n        gokart.tree_info().output().dump(gokart.make_tree_info(cp.get_task_obj(), details=details))\n\n\ndef _try_tree_info(cmdline_args):\n    with CmdlineParser.global_instance(cmdline_args):\n        mode = gokart.tree_info().mode\n        output_path = gokart.tree_info().output().path()\n\n    # do nothing if `mode` is empty.\n    if mode == '':\n        return\n\n    # output tree info and exit.\n    if mode == 'simple':\n        _run_tree_info(cmdline_args, details=False)\n    elif mode == 'all':\n        _run_tree_info(cmdline_args, details=True)\n    else:\n        raise ValueError(f'--tree-info-mode must be \"simple\" or \"all\", but \"{mode}\" is passed.')\n    logger.info(f'output tree info: {output_path}')\n    sys.exit()\n\n\ndef _try_to_delete_unnecessary_output_file(cmdline_args: list[str]) -> None:\n    with CmdlineParser.global_instance(cmdline_args) as cp:\n        task: gokart.TaskOnKart[Any] = cp.get_task_obj()\n        if task.delete_unnecessary_output_files:\n            if ObjectStorage.if_object_storage_path(task.workspace_directory):\n                logger.info('delete-unnecessary-output-files is not support s3/gcs.')\n            else:\n                gokart.delete_local_unnecessary_outputs(task)\n            sys.exit()\n\n\ndef _try_get_slack_api(cmdline_args: list[str]) -> gokart.slack.SlackAPI | None:\n    with CmdlineParser.global_instance(cmdline_args):\n        config = gokart.slack.SlackConfig()\n        token = os.getenv(config.token_name, '')\n        channel = config.channel\n        to_user = config.to_user\n        if token and channel:\n            logger.info('Slack notification is activated.')\n            return gokart.slack.SlackAPI(token=token, channel=channel, to_user=to_user)\n    logger.info('Slack notification is not activated.')\n    return None\n\n\ndef _try_to_send_event_summary_to_slack(\n    slack_api: gokart.slack.SlackAPI | None, event_aggregator: gokart.slack.EventAggregator, cmdline_args: list[str]\n) -> None:\n    if slack_api is None:\n        # do nothing\n        return\n    options = gokart.slack.SlackConfig()\n    with CmdlineParser.global_instance(cmdline_args) as cp:\n        task = cp.get_task_obj()\n        tree_info = gokart.make_tree_info(task, details=True) if options.send_tree_info else 'Please add SlackConfig.send_tree_info to include tree-info'\n        task_name = type(task).__name__\n\n    comment = f'Report of {task_name}' + os.linesep + event_aggregator.get_summary()\n    content = os.linesep.join(['===== Event List ====', event_aggregator.get_event_list(), os.linesep, '==== Tree Info ====', tree_info])\n    slack_api.send_snippet(comment=comment, title='event.txt', content=content)\n\n\ndef _run_with_retcodes(argv):\n    \"\"\"run_with_retcodes equivalent that uses gokart's WorkerSchedulerFactory.\"\"\"\n    retcode_logger = logging.getLogger('luigi-interface')\n    with luigi.cmdline_parser.CmdlineParser.global_instance(argv):\n        retcodes = luigi.retcodes.retcode()\n\n    worker = None\n    try:\n        worker = luigi.interface._run(argv, worker_scheduler_factory=WorkerSchedulerFactory()).worker\n    except luigi.interface.PidLockAlreadyTakenExit:\n        sys.exit(retcodes.already_running)\n    except Exception:\n        env_params = luigi.interface.core()\n        luigi.setup_logging.InterfaceLogging.setup(env_params)\n        retcode_logger.exception('Uncaught exception in luigi')\n        sys.exit(retcodes.unhandled_exception)\n\n    with luigi.cmdline_parser.CmdlineParser.global_instance(argv):\n        task_sets = luigi.execution_summary._summary_dict(worker)\n        root_task = luigi.execution_summary._root_task(worker)\n        non_empty_categories = {k: v for k, v in task_sets.items() if v}.keys()\n\n    def has(status):\n        assert status in luigi.execution_summary._ORDERED_STATUSES\n        return status in non_empty_categories\n\n    codes_and_conds = (\n        (retcodes.missing_data, has('still_pending_ext')),\n        (retcodes.task_failed, has('failed')),\n        (retcodes.already_running, has('run_by_other_worker')),\n        (retcodes.scheduling_error, has('scheduling_error')),\n        (retcodes.not_run, has('not_run')),\n    )\n    expected_ret_code = max(code * (1 if cond else 0) for code, cond in codes_and_conds)\n\n    if expected_ret_code == 0 and root_task not in task_sets['completed'] and root_task not in task_sets['already_done']:\n        sys.exit(retcodes.not_run)\n    else:\n        sys.exit(expected_ret_code)\n\n\ndef run(cmdline_args=None, set_retcode=True):\n    cmdline_args = cmdline_args or sys.argv[1:]\n\n    if set_retcode:\n        luigi.retcodes.retcode.already_running = 10  # type: ignore\n        luigi.retcodes.retcode.missing_data = 20  # type: ignore\n        luigi.retcodes.retcode.not_run = 30  # type: ignore\n        luigi.retcodes.retcode.task_failed = 40  # type: ignore\n        luigi.retcodes.retcode.scheduling_error = 50  # type: ignore\n\n    _try_tree_info(cmdline_args)\n    _try_to_delete_unnecessary_output_file(cmdline_args)\n    gokart.testing.try_to_run_test_for_empty_data_frame(cmdline_args)\n\n    slack_api = _try_get_slack_api(cmdline_args)\n    event_aggregator = gokart.slack.EventAggregator()\n    try:\n        event_aggregator.set_handlers()\n        _run_with_retcodes(cmdline_args)\n    except SystemExit as e:\n        _try_to_send_event_summary_to_slack(slack_api, event_aggregator, cmdline_args)\n        sys.exit(e.code)\n"
  },
  {
    "path": "gokart/s3_config.py",
    "content": "from __future__ import annotations\n\nimport os\n\nimport luigi\nimport luigi.contrib.s3\n\n\nclass S3Config(luigi.Config):\n    aws_access_key_id_name = luigi.Parameter(default='AWS_ACCESS_KEY_ID', description='AWS access key id environment variable.')\n    aws_secret_access_key_name = luigi.Parameter(default='AWS_SECRET_ACCESS_KEY', description='AWS secret access key environment variable.')\n\n    _client = None\n\n    def get_s3_client(self) -> luigi.contrib.s3.S3Client:\n        if self._client is None:  # use cache as like singleton object\n            self._client = self._get_s3_client()\n        return self._client\n\n    def _get_s3_client(self) -> luigi.contrib.s3.S3Client:\n        return luigi.contrib.s3.S3Client(\n            aws_access_key_id=os.environ.get(self.aws_access_key_id_name), aws_secret_access_key=os.environ.get(self.aws_secret_access_key_name)\n        )\n"
  },
  {
    "path": "gokart/s3_zip_client.py",
    "content": "from __future__ import annotations\n\nimport os\nimport shutil\nfrom typing import cast\n\nfrom gokart.s3_config import S3Config\nfrom gokart.zip_client import ZipClient, _unzip_file\n\n\nclass S3ZipClient(ZipClient):\n    def __init__(self, file_path: str, temporary_directory: str) -> None:\n        self._file_path = file_path\n        self._temporary_directory = temporary_directory\n        self._client = S3Config().get_s3_client()\n\n    def exists(self) -> bool:\n        return cast(bool, self._client.exists(self._file_path))\n\n    def make_archive(self) -> None:\n        extension = os.path.splitext(self._file_path)[1]\n        if not os.path.exists(self._temporary_directory):\n            # Check path existence since shutil.make_archive() of python 3.10+ does not check it.\n            raise FileNotFoundError(f'Temporary directory {self._temporary_directory} is not found.')\n        shutil.make_archive(base_name=self._temporary_directory, format=extension[1:], root_dir=self._temporary_directory)\n        self._client.put(self._temporary_file_path(), self._file_path)\n\n    def unpack_archive(self) -> None:\n        os.makedirs(self._temporary_directory, exist_ok=True)\n        self._client.get(self._file_path, self._temporary_file_path())\n        _unzip_file(fp=self._temporary_file_path(), extract_dir=self._temporary_directory)\n\n    def remove(self) -> None:\n        self._client.remove(self._file_path)\n\n    @property\n    def path(self) -> str:\n        return self._file_path\n\n    def _temporary_file_path(self):\n        extension = os.path.splitext(self._file_path)[1]\n        base_name = self._temporary_directory\n        if base_name.endswith('/'):\n            base_name = base_name[:-1]\n        return base_name + extension\n"
  },
  {
    "path": "gokart/slack/__init__.py",
    "content": "from gokart.slack.event_aggregator import EventAggregator\nfrom gokart.slack.slack_api import SlackAPI\nfrom gokart.slack.slack_config import SlackConfig\n\nfrom .slack_api import ChannelListNotLoadedError, ChannelNotFoundError, FileNotUploadedError\n\n__all__ = [\n    'ChannelListNotLoadedError',\n    'ChannelNotFoundError',\n    'FileNotUploadedError',\n    'EventAggregator',\n    'SlackAPI',\n    'SlackConfig',\n]\n"
  },
  {
    "path": "gokart/slack/event_aggregator.py",
    "content": "from __future__ import annotations\n\nimport os\nfrom logging import getLogger\nfrom typing import Any, TypedDict\n\nimport luigi\n\nlogger = getLogger(__name__)\n\n\nclass FailureEvent(TypedDict):\n    task: str\n    exception: str\n\n\nclass EventAggregator:\n    def __init__(self) -> None:\n        self._success_events: list[str] = []\n        self._failure_events: list[FailureEvent] = []\n\n    def set_handlers(self):\n        handlers = [(luigi.Event.SUCCESS, self._success), (luigi.Event.FAILURE, self._failure)]\n        for event, handler in handlers:\n            luigi.Task.event_handler(event)(handler)\n\n    def get_summary(self) -> str:\n        return f'Success: {len(self._success_events)}; Failure: {len(self._failure_events)}'\n\n    def get_event_list(self) -> str:\n        message = ''\n        if len(self._failure_events) != 0:\n            failure_message = os.linesep.join([f'Task: {failure[\"task\"]}; Exception: {failure[\"exception\"]}' for failure in self._failure_events])\n            message += '---- Failure Tasks ----' + os.linesep + failure_message\n        if len(self._success_events) != 0:\n            success_message = os.linesep.join(self._success_events)\n            message += '---- Success Tasks ----' + os.linesep + success_message\n        if message == '':\n            message = 'Tasks were not executed.'\n        return message\n\n    def _success(self, task):\n        self._success_events.append(self._task_to_str(task))\n\n    def _failure(self, task, exception):\n        failure: FailureEvent = {'task': self._task_to_str(task), 'exception': str(exception)}\n        self._failure_events.append(failure)\n\n    @staticmethod\n    def _task_to_str(task: Any) -> str:\n        return f'{type(task).__name__}:[{task.make_unique_id()}]'\n"
  },
  {
    "path": "gokart/slack/slack_api.py",
    "content": "from __future__ import annotations\n\nfrom logging import getLogger\n\nimport slack_sdk\n\nlogger = getLogger(__name__)\n\n\nclass ChannelListNotLoadedError(RuntimeError):\n    pass\n\n\nclass ChannelNotFoundError(RuntimeError):\n    pass\n\n\nclass FileNotUploadedError(RuntimeError):\n    pass\n\n\nclass SlackAPI:\n    def __init__(self, token: str, channel: str, to_user: str) -> None:\n        self._client = slack_sdk.WebClient(token=token)\n        self._channel_id = self._get_channel_id(channel)\n        self._to_user = to_user if to_user == '' or to_user.startswith('@') else '@' + to_user\n\n    def _get_channel_id(self, channel_name):\n        params = {'exclude_archived': True, 'limit': 100}\n        try:\n            for channels in self._client.conversations_list(params=params):\n                if not channels:\n                    raise ChannelListNotLoadedError('Channel list is empty.')\n                for channel in channels.get('channels', []):\n                    if channel['name'] == channel_name:\n                        return channel['id']\n            raise ChannelNotFoundError(f'Channel {channel_name} is not found in public channels.')\n        except Exception as e:\n            logger.warning(f'The job will start without slack notification: {e}')\n\n    def send_snippet(self, comment, title, content):\n        try:\n            request_body = dict(\n                channels=self._channel_id, initial_comment=f'<{self._to_user}> {comment}' if self._to_user else comment, content=content, title=title\n            )\n            response = self._client.api_call('files.upload', data=request_body)\n            if not response['ok']:\n                raise FileNotUploadedError(f'Error while uploading file. The error reason is \"{response[\"error\"]}\".')\n        except Exception as e:\n            logger.warning(f'Failed to send slack notification: {e}')\n"
  },
  {
    "path": "gokart/slack/slack_config.py",
    "content": "from __future__ import annotations\n\nimport luigi\n\n\nclass SlackConfig(luigi.Config):\n    token_name = luigi.Parameter(default='SLACK_TOKEN', description='slack token environment variable.')\n    channel = luigi.Parameter(default='', significant=False, description='channel name for notification.')\n    to_user = luigi.Parameter(default='', significant=False, description='Optional; user name who is supposed to be mentioned.')\n    send_tree_info = luigi.BoolParameter(\n        default=False,\n        significant=False,\n        description='When this option is true, the dependency tree of tasks is included in send message.'\n        'It is recommended to set false to this option when notification takes long time.',\n    )\n"
  },
  {
    "path": "gokart/target.py",
    "content": "from __future__ import annotations\n\nimport hashlib\nimport os\nimport shutil\nfrom abc import abstractmethod\nfrom datetime import datetime\nfrom glob import glob\nfrom logging import getLogger\nfrom typing import Any, cast\n\nimport luigi\nimport numpy as np\nimport pandas as pd\n\nfrom gokart.conflict_prevention_lock.task_lock import TaskLockParams, make_task_lock_params\nfrom gokart.conflict_prevention_lock.task_lock_wrappers import wrap_dump_with_lock, wrap_load_with_lock, wrap_remove_with_lock\nfrom gokart.file_processor import FileProcessor, make_file_processor\nfrom gokart.gcs_obj_metadata_client import GCSObjectMetadataClient\nfrom gokart.object_storage import ObjectStorage\nfrom gokart.required_task_output import RequiredTaskOutput\nfrom gokart.utils import FlattenableItems\nfrom gokart.zip_client_util import make_zip_client\n\nlogger = getLogger(__name__)\n\n\nclass TargetOnKart(luigi.Target):\n    def exists(self) -> bool:\n        return self._exists()\n\n    def load(self) -> Any:\n        return wrap_load_with_lock(func=self._load, task_lock_params=self._get_task_lock_params())()\n\n    def dump(\n        self,\n        obj: Any,\n        lock_at_dump: bool = True,\n        task_params: dict[str, str] | None = None,\n        custom_labels: dict[str, str] | None = None,\n        required_task_outputs: FlattenableItems[RequiredTaskOutput] | None = None,\n    ) -> None:\n        if lock_at_dump:\n            wrap_dump_with_lock(func=self._dump, task_lock_params=self._get_task_lock_params(), exist_check=self.exists)(\n                obj=obj,\n                task_params=task_params,\n                custom_labels=custom_labels,\n                required_task_outputs=required_task_outputs,\n            )\n        else:\n            self._dump(obj=obj, task_params=task_params, custom_labels=custom_labels, required_task_outputs=required_task_outputs)\n\n    def remove(self) -> None:\n        if self.exists():\n            wrap_remove_with_lock(self._remove, task_lock_params=self._get_task_lock_params())()\n\n    def last_modification_time(self) -> datetime:\n        return self._last_modification_time()\n\n    def path(self) -> str:\n        return self._path()\n\n    @abstractmethod\n    def _exists(self) -> bool:\n        pass\n\n    @abstractmethod\n    def _get_task_lock_params(self) -> TaskLockParams:\n        pass\n\n    @abstractmethod\n    def _load(self) -> Any:\n        pass\n\n    @abstractmethod\n    def _dump(\n        self,\n        obj: Any,\n        task_params: dict[str, str] | None = None,\n        custom_labels: dict[str, str] | None = None,\n        required_task_outputs: FlattenableItems[RequiredTaskOutput] | None = None,\n    ) -> None:\n        pass\n\n    @abstractmethod\n    def _remove(self) -> None:\n        pass\n\n    @abstractmethod\n    def _last_modification_time(self) -> datetime:\n        pass\n\n    @abstractmethod\n    def _path(self) -> str:\n        pass\n\n\nclass SingleFileTarget(TargetOnKart):\n    def __init__(\n        self,\n        target: luigi.target.FileSystemTarget,\n        processor: FileProcessor,\n        task_lock_params: TaskLockParams,\n    ) -> None:\n        self._target = target\n        self._processor = processor\n        self._task_lock_params = task_lock_params\n\n    def _exists(self) -> bool:\n        return cast(bool, self._target.exists())\n\n    def _get_task_lock_params(self) -> TaskLockParams:\n        return self._task_lock_params\n\n    def _load(self) -> Any:\n        with self._target.open('r') as f:\n            return self._processor.load(f)\n\n    def _dump(\n        self,\n        obj: Any,\n        task_params: dict[str, str] | None = None,\n        custom_labels: dict[str, str] | None = None,\n        required_task_outputs: FlattenableItems[RequiredTaskOutput] | None = None,\n    ) -> None:\n        with self._target.open('w') as f:\n            self._processor.dump(obj, f)\n        if self.path().startswith('gs://'):\n            GCSObjectMetadataClient.add_task_state_labels(\n                path=self.path(), task_params=task_params, custom_labels=custom_labels, required_task_outputs=required_task_outputs\n            )\n\n    def _remove(self) -> None:\n        self._target.remove()\n\n    def _last_modification_time(self) -> datetime:\n        return _get_last_modification_time(self._target.path)\n\n    def _path(self) -> str:\n        return self._target.path\n\n\nclass ModelTarget(TargetOnKart):\n    def __init__(\n        self,\n        file_path: str,\n        temporary_directory: str,\n        load_function: Any,\n        save_function: Any,\n        task_lock_params: TaskLockParams,\n    ) -> None:\n        self._zip_client = make_zip_client(file_path, temporary_directory)\n        self._temporary_directory = temporary_directory\n        self._save_function = save_function\n        self._load_function = load_function\n        self._task_lock_params = task_lock_params\n\n    def _exists(self) -> bool:\n        return self._zip_client.exists()\n\n    def _get_task_lock_params(self) -> TaskLockParams:\n        return self._task_lock_params\n\n    def _load(self) -> Any:\n        self._zip_client.unpack_archive()\n        self._load_function = self._load_function or make_target(self._load_function_path()).load()\n        model = self._load_function(self._model_path())\n        self._remove_temporary_directory()\n        return model\n\n    def _dump(\n        self,\n        obj: Any,\n        task_params: dict[str, str] | None = None,\n        custom_labels: dict[str, str] | None = None,\n        required_task_outputs: FlattenableItems[RequiredTaskOutput] | None = None,\n    ) -> None:\n        self._make_temporary_directory()\n        self._save_function(obj, self._model_path())\n        make_target(self._load_function_path()).dump(\n            self._load_function, task_params=task_params, custom_labels=custom_labels, required_task_outputs=required_task_outputs\n        )\n        self._zip_client.make_archive()\n        self._remove_temporary_directory()\n\n    def _remove(self) -> None:\n        self._zip_client.remove()\n\n    def _last_modification_time(self) -> datetime:\n        return _get_last_modification_time(self._zip_client.path)\n\n    def _path(self) -> str:\n        return self._zip_client.path\n\n    def _model_path(self):\n        return os.path.join(self._temporary_directory, 'model.pkl')\n\n    def _load_function_path(self):\n        return os.path.join(self._temporary_directory, 'load_function.pkl')\n\n    def _remove_temporary_directory(self):\n        shutil.rmtree(self._temporary_directory)\n\n    def _make_temporary_directory(self):\n        os.makedirs(self._temporary_directory, exist_ok=True)\n\n\nclass LargeDataFrameProcessor:\n    def __init__(self, max_byte: int):\n        self.max_byte = int(max_byte)\n\n    def save(self, df: pd.DataFrame, file_path: str) -> None:\n        dir_path = os.path.dirname(file_path)\n        os.makedirs(dir_path, exist_ok=True)\n\n        if df.empty:\n            df.to_pickle(os.path.join(dir_path, 'data_0.pkl'))\n            return\n\n        split_size = df.values.nbytes // self.max_byte + 1\n        logger.info(f'saving a large pdDataFrame with split_size={split_size}')\n        for i, idx in list(enumerate(np.array_split(range(df.shape[0]), split_size))):\n            df.iloc[idx[0] : idx[-1] + 1].to_pickle(os.path.join(dir_path, f'data_{i}.pkl'))\n\n    @staticmethod\n    def load(file_path: str) -> pd.DataFrame:\n        dir_path = os.path.dirname(file_path)\n\n        return pd.concat([pd.read_pickle(file_path) for file_path in glob(os.path.join(dir_path, 'data_*.pkl'))])\n\n\ndef _make_file_system_target(file_path: str, processor: FileProcessor | None = None, store_index_in_feather: bool = True) -> luigi.target.FileSystemTarget:\n    processor = processor or make_file_processor(file_path, store_index_in_feather=store_index_in_feather)\n    if ObjectStorage.if_object_storage_path(file_path):\n        return ObjectStorage.get_object_storage_target(file_path, processor.format())\n    return luigi.LocalTarget(file_path, format=processor.format())\n\n\ndef _make_file_path(original_path: str, unique_id: str | None = None) -> str:\n    if unique_id is not None:\n        [base, extension] = os.path.splitext(original_path)\n        return base + '_' + unique_id + extension\n    return original_path\n\n\ndef _get_last_modification_time(path: str) -> datetime:\n    if ObjectStorage.if_object_storage_path(path):\n        if ObjectStorage.exists(path):\n            return ObjectStorage.get_timestamp(path)\n        raise FileNotFoundError(f'No such file or directory: {path}')\n    return datetime.fromtimestamp(os.path.getmtime(path))\n\n\ndef make_target(\n    file_path: str,\n    unique_id: str | None = None,\n    processor: FileProcessor | None = None,\n    task_lock_params: TaskLockParams | None = None,\n    store_index_in_feather: bool = True,\n) -> TargetOnKart:\n    _task_lock_params = task_lock_params if task_lock_params is not None else make_task_lock_params(file_path=file_path, unique_id=unique_id)\n    file_path = _make_file_path(file_path, unique_id)\n    processor = processor or make_file_processor(file_path, store_index_in_feather=store_index_in_feather)\n    file_system_target = _make_file_system_target(file_path, processor=processor, store_index_in_feather=store_index_in_feather)\n    return SingleFileTarget(target=file_system_target, processor=processor, task_lock_params=_task_lock_params)\n\n\ndef make_model_target(\n    file_path: str,\n    temporary_directory: str,\n    save_function: Any,\n    load_function: Any,\n    unique_id: str | None = None,\n    task_lock_params: TaskLockParams | None = None,\n) -> TargetOnKart:\n    _task_lock_params = task_lock_params if task_lock_params is not None else make_task_lock_params(file_path=file_path, unique_id=unique_id)\n    file_path = _make_file_path(file_path, unique_id)\n    temporary_directory = os.path.join(temporary_directory, hashlib.md5(file_path.encode()).hexdigest())\n    return ModelTarget(\n        file_path=file_path,\n        temporary_directory=temporary_directory,\n        save_function=save_function,\n        load_function=load_function,\n        task_lock_params=_task_lock_params,\n    )\n"
  },
  {
    "path": "gokart/task.py",
    "content": "from __future__ import annotations\n\nimport functools\nimport hashlib\nimport inspect\nimport os\nimport random\nimport types\nfrom collections.abc import Callable, Generator, Iterable\nfrom importlib import import_module\nfrom logging import getLogger\nfrom typing import Any, Generic, TypeVar, cast, overload\n\nimport luigi\nimport pandas as pd\nfrom luigi.parameter import ParameterVisibility\n\nimport gokart\nimport gokart.target\nfrom gokart.conflict_prevention_lock.task_lock import make_task_lock_params, make_task_lock_params_for_run\nfrom gokart.conflict_prevention_lock.task_lock_wrappers import wrap_run_with_lock\nfrom gokart.file_processor import FileProcessor, make_file_processor\nfrom gokart.pandas_type_config import PandasTypeConfigMap\nfrom gokart.parameter import ExplicitBoolParameter, ListTaskInstanceParameter, TaskInstanceParameter\nfrom gokart.required_task_output import RequiredTaskOutput\nfrom gokart.target import TargetOnKart\nfrom gokart.task_complete_check import task_complete_check_wrapper\nfrom gokart.utils import FlattenableItems, flatten, get_dataframe_type_from_task, map_flattenable_items\n\nlogger = getLogger(__name__)\n\n\nT = TypeVar('T')\nK = TypeVar('K')\n\n\n# NOTE: inherited from AssertionError for backward compatibility (Formerly, Gokart raises that exception when a task dumps an empty DataFrame).\nclass EmptyDumpError(AssertionError):\n    \"\"\"Raised when the task attempts to dump an empty DataFrame even though it is prohibited (``fail_on_empty_dump`` is set to True)\"\"\"\n\n\nclass TaskOnKart(luigi.Task, Generic[T]):\n    \"\"\"\n    This is a wrapper class of luigi.Task.\n\n    The key methods of a TaskOnKart are:\n\n    * :py:meth:`make_target` - this makes output target with a relative file path.\n    * :py:meth:`make_model_target` - this makes output target for models which generate multiple files to save.\n    * :py:meth:`load` - this loads input files of this task.\n    * :py:meth:`dump` - this save a object as output of this task.\n    \"\"\"\n\n    workspace_directory: luigi.Parameter[str] = luigi.Parameter(\n        default='./resources/', description='A directory to set outputs on. Please use a path starts with s3:// when you use s3.', significant=False\n    )\n    local_temporary_directory: luigi.Parameter[str] = luigi.Parameter(\n        default='./resources/tmp/', description='A directory to save temporary files.', significant=False\n    )\n    rerun: luigi.BoolParameter = luigi.BoolParameter(\n        default=False, description='If this is true, this task will run even if all output files exist.', significant=False\n    )\n    strict_check: luigi.BoolParameter = luigi.BoolParameter(\n        default=False, description='If this is true, this task will not run only if all input and output files exist.', significant=False\n    )\n    modification_time_check: luigi.BoolParameter = luigi.BoolParameter(\n        default=False,\n        description='If this is true, this task will not run only if all input and output files exist,'\n        ' and all input files are modified before output file are modified.',\n        significant=False,\n    )\n    serialized_task_definition_check: luigi.BoolParameter = luigi.BoolParameter(\n        default=False,\n        description='If this is true, even if all outputs are present,this task will be executed if any changes have been made to the code.',\n        significant=False,\n    )\n    delete_unnecessary_output_files: luigi.BoolParameter = luigi.BoolParameter(\n        default=False, description='If this is true, delete unnecessary output files.', significant=False\n    )\n    significant: luigi.BoolParameter = luigi.BoolParameter(\n        default=True, description='If this is false, this task is not treated as a part of dependent tasks for the unique id.', significant=False\n    )\n    fix_random_seed_methods: luigi.Parameter[tuple[str, ...]] = luigi.ListParameter(\n        default=('random.seed', 'numpy.random.seed'), description='Fix random seed method list.', significant=False\n    )\n    FIX_RANDOM_SEED_VALUE_NONE_MAGIC_NUMBER = -42497368\n    fix_random_seed_value: luigi.Parameter[int] = luigi.IntParameter(\n        default=FIX_RANDOM_SEED_VALUE_NONE_MAGIC_NUMBER, description='Fix random seed method value.', significant=False\n    )  # FIXME: should fix with OptionalIntParameter after newer luigi (https://github.com/spotify/luigi/pull/3079) will be released\n\n    redis_host: luigi.Parameter[str | None] = luigi.OptionalParameter(default=None, description='Task lock check is deactivated, when None.', significant=False)\n    redis_port: luigi.OptionalIntParameter = luigi.OptionalIntParameter(\n        default=None,  # type: ignore\n        description='Task lock check is deactivated, when None.',\n        significant=False,\n    )\n    redis_timeout: luigi.IntParameter = luigi.IntParameter(\n        default=180, description='Redis lock will be released after `redis_timeout` seconds', significant=False\n    )\n\n    fail_on_empty_dump: luigi.Parameter[bool] = ExplicitBoolParameter(default=False, description='Fail when task dumps empty DF', significant=False)\n    store_index_in_feather: luigi.Parameter[bool] = ExplicitBoolParameter(\n        default=True, description='Wether to store index when using feather as a output object.', significant=False\n    )\n\n    cache_unique_id: luigi.Parameter[bool] = ExplicitBoolParameter(default=True, description='Cache unique id during runtime', significant=False)\n    should_dump_supplementary_log_files: luigi.Parameter[bool] = ExplicitBoolParameter(\n        default=True,\n        description='Whether to dump supplementary files (task_log, random_seed, task_params, processing_time, module_versions) or not. \\\n         Note that when set to False, task_info functions (e.g. gokart.tree.task_info.make_task_info_as_tree_str()) cannot be used.',\n        significant=False,\n    )\n    complete_check_at_run: luigi.Parameter[bool] = ExplicitBoolParameter(\n        default=True, description='Check if output file exists at run. If exists, run() will be skipped.', significant=False\n    )\n    should_lock_run: luigi.Parameter[bool] = ExplicitBoolParameter(\n        default=False, significant=False, description='Whether to use redis lock or not at task run.'\n    )\n\n    @property\n    def priority(self):\n        return random.Random().random()  # seed is fixed, so we need to use random.Random().random() instead f random.random()\n\n    def __init__(self, *args, **kwargs):\n        self._add_configuration(kwargs, 'TaskOnKart')\n        # 'This parameter is dumped into \"workspace_directory/log/task_log/\" when this task finishes with success.'\n        self.task_log = dict()\n        self.task_unique_id = None\n        super().__init__(*args, **kwargs)\n        self._rerun_state = self.rerun\n        self._lock_at_dump = True\n\n        # Cache to_str_params to avoid slow task creation in a deep task tree.\n        # For example, gokart.build(RecursiveTask(dep=RecursiveTask(dep=RecursiveTask(dep=HelloWorldTask())))) results in O(n^2) calls to to_str_params.\n        # However, @lru_cache cannot be used as a decorator because luigi.Task employs metaclass tricks.\n        self.to_str_params = functools.lru_cache(maxsize=None)(self.to_str_params)  # type: ignore[method-assign]\n\n        if self.complete_check_at_run:\n            self.run = task_complete_check_wrapper(run_func=self.run, complete_check_func=self.complete)  # type: ignore\n\n        if self.should_lock_run:\n            self._lock_at_dump = False\n            assert self.redis_host is not None, 'redis_host must be set when should_lock_run is True.'\n            assert self.redis_port is not None, 'redis_port must be set when should_lock_run is True.'\n            task_lock_params = make_task_lock_params_for_run(task_self=self)\n            self.run = wrap_run_with_lock(run_func=self.run, task_lock_params=task_lock_params)  # type: ignore\n\n    def input(self) -> FlattenableItems[TargetOnKart]:\n        return cast(FlattenableItems[TargetOnKart], super().input())\n\n    def output(self) -> FlattenableItems[TargetOnKart]:\n        return self.make_target()\n\n    def requires(self) -> FlattenableItems[TaskOnKart[Any]]:\n        tasks = self.make_task_instance_dictionary()\n        return tasks or []  # when tasks is empty dict, then this returns empty list.\n\n    def make_task_instance_dictionary(self) -> dict[str, TaskOnKart[Any]]:\n        return {key: var for key, var in vars(self).items() if self.is_task_on_kart(var)}\n\n    @staticmethod\n    def is_task_on_kart(value):\n        return isinstance(value, TaskOnKart) or (isinstance(value, tuple) and bool(value) and all([isinstance(v, TaskOnKart) for v in value]))\n\n    @classmethod\n    def _add_configuration(cls, kwargs, section):\n        config = luigi.configuration.get_config()\n        class_variables = dict(TaskOnKart.__dict__)\n        class_variables.update(dict(cls.__dict__))\n        if section not in config:\n            return\n        for key, value in dict(config[section]).items():\n            if key not in kwargs and key in class_variables:\n                kwargs[key] = class_variables[key].parse(value)\n\n    def complete(self) -> bool:\n        if self._rerun_state:\n            for target in flatten(self.output()):\n                target.remove()\n            self._rerun_state = False\n            return False\n\n        is_completed = all([t.exists() for t in flatten(self.output())])\n\n        if self.strict_check or self.modification_time_check:\n            requirements = flatten(self.requires())\n            inputs = flatten(self.input())\n            is_completed = is_completed and all([task.complete() for task in requirements]) and all([i.exists() for i in inputs])\n\n        if not self.modification_time_check or not is_completed or not self.input():\n            return is_completed\n\n        return self._check_modification_time()\n\n    def _check_modification_time(self) -> bool:\n        common_path = set(t.path() for t in flatten(self.input())) & set(t.path() for t in flatten(self.output()))\n        input_tasks = [t for t in flatten(self.input()) if t.path() not in common_path]\n        output_tasks = [t for t in flatten(self.output()) if t.path() not in common_path]\n\n        input_modification_time = max([target.last_modification_time() for target in input_tasks]) if input_tasks else None\n        output_modification_time = min([target.last_modification_time() for target in output_tasks]) if output_tasks else None\n\n        if input_modification_time is None or output_modification_time is None:\n            return True\n\n        # \"=\" must be required in the following statements, because some tasks use input targets as output targets.\n        return input_modification_time <= output_modification_time\n\n    def clone(self, cls=None, **kwargs):\n        _SPECIAL_PARAMS = {'rerun', 'strict_check', 'modification_time_check'}\n        if cls is None:\n            cls = self.__class__\n\n        new_k = {}\n        for param_name, _ in cls.get_params():\n            if param_name in kwargs:\n                new_k[param_name] = kwargs[param_name]\n            elif hasattr(self, param_name) and (param_name not in _SPECIAL_PARAMS):\n                new_k[param_name] = getattr(self, param_name)\n\n        return cls(**new_k)\n\n    def make_target(self, relative_file_path: str | None = None, use_unique_id: bool = True, processor: FileProcessor | None = None) -> TargetOnKart:\n        formatted_relative_file_path = (\n            relative_file_path if relative_file_path is not None else os.path.join(self.__module__.replace('.', '/'), f'{type(self).__name__}.pkl')\n        )\n        file_path = os.path.join(self.workspace_directory, formatted_relative_file_path)\n        unique_id = self.make_unique_id() if use_unique_id else None\n\n        # Auto-select processor based on type parameter if not provided\n        if processor is None and relative_file_path is not None:\n            processor = self._create_processor_for_dataframe_type(file_path)\n\n        task_lock_params = make_task_lock_params(\n            file_path=file_path,\n            unique_id=unique_id,\n            redis_host=self.redis_host,\n            redis_port=self.redis_port,\n            redis_timeout=self.redis_timeout,\n            raise_task_lock_exception_on_collision=False,\n        )\n\n        return gokart.target.make_target(\n            file_path=file_path, unique_id=unique_id, processor=processor, task_lock_params=task_lock_params, store_index_in_feather=self.store_index_in_feather\n        )\n\n    def _create_processor_for_dataframe_type(self, file_path: str) -> FileProcessor:\n        df_type = get_dataframe_type_from_task(self)\n        return make_file_processor(file_path, dataframe_type=df_type, store_index_in_feather=self.store_index_in_feather)\n\n    def make_large_data_frame_target(self, relative_file_path: str | None = None, use_unique_id: bool = True, max_byte: int = int(2**26)) -> TargetOnKart:\n        formatted_relative_file_path = (\n            relative_file_path if relative_file_path is not None else os.path.join(self.__module__.replace('.', '/'), f'{type(self).__name__}.zip')\n        )\n        file_path = os.path.join(self.workspace_directory, formatted_relative_file_path)\n        unique_id = self.make_unique_id() if use_unique_id else None\n        task_lock_params = make_task_lock_params(\n            file_path=file_path,\n            unique_id=unique_id,\n            redis_host=self.redis_host,\n            redis_port=self.redis_port,\n            redis_timeout=self.redis_timeout,\n            raise_task_lock_exception_on_collision=False,\n        )\n\n        return gokart.target.make_model_target(\n            file_path=file_path,\n            temporary_directory=self.local_temporary_directory,\n            unique_id=unique_id,\n            save_function=gokart.target.LargeDataFrameProcessor(max_byte=max_byte).save,\n            load_function=gokart.target.LargeDataFrameProcessor.load,\n            task_lock_params=task_lock_params,\n        )\n\n    def make_model_target(\n        self, relative_file_path: str, save_function: Callable[[Any, str], None], load_function: Callable[[str], Any], use_unique_id: bool = True\n    ) -> TargetOnKart:\n        \"\"\"\n        Make target for models which generate multiple files in saving, e.g. gensim.Word2Vec, Tensorflow, and so on.\n\n        :param relative_file_path: A file path to save.\n        :param save_function: A function to save a model. This takes a model object and a file path.\n        :param load_function: A function to load a model. This takes a file path and returns a model object.\n        :param use_unique_id: If this is true, add an unique id to a file base name.\n        \"\"\"\n        file_path = os.path.join(self.workspace_directory, relative_file_path)\n        assert relative_file_path[-3:] == 'zip', f'extension must be zip, but {relative_file_path} is passed.'\n        unique_id = self.make_unique_id() if use_unique_id else None\n        task_lock_params = make_task_lock_params(\n            file_path=file_path,\n            unique_id=unique_id,\n            redis_host=self.redis_host,\n            redis_port=self.redis_port,\n            redis_timeout=self.redis_timeout,\n            raise_task_lock_exception_on_collision=False,\n        )\n\n        return gokart.target.make_model_target(\n            file_path=file_path,\n            temporary_directory=self.local_temporary_directory,\n            unique_id=unique_id,\n            save_function=save_function,\n            load_function=load_function,\n            task_lock_params=task_lock_params,\n        )\n\n    @overload\n    def load(self, target: None | str | TargetOnKart = None) -> Any: ...\n\n    @overload\n    def load(self, target: TaskOnKart[K]) -> K: ...\n\n    @overload\n    def load(self, target: list[TaskOnKart[K]]) -> list[K]: ...\n\n    def load(self, target: None | str | TargetOnKart | TaskOnKart[K] | list[TaskOnKart[K]] = None) -> Any:\n        def _load(targets):\n            if isinstance(targets, list) or isinstance(targets, tuple):\n                return [_load(t) for t in targets]\n            if isinstance(targets, dict):\n                return {k: _load(t) for k, t in targets.items()}\n            return targets.load()\n\n        return _load(self._get_input_targets(target))\n\n    @overload\n    def load_generator(self, target: None | str | TargetOnKart = None) -> Generator[Any, None, None]: ...\n\n    @overload\n    def load_generator(self, target: list[TaskOnKart[K]]) -> Generator[K, None, None]: ...\n\n    def load_generator(self, target: None | str | TargetOnKart | list[TaskOnKart[K]] = None) -> Generator[Any, None, None]:\n        def _load(targets):\n            if isinstance(targets, list) or isinstance(targets, tuple):\n                for t in targets:\n                    yield from _load(t)\n            elif isinstance(targets, dict):\n                for k, t in targets.items():\n                    yield from {k: _load(t)}\n            else:\n                yield targets.load()\n\n        return cast(Generator[Any, None, None], _load(self._get_input_targets(target)))\n\n    @overload\n    def dump(self, obj: T, target: None = None, custom_labels: dict[Any, Any] | None = None) -> None: ...\n\n    @overload\n    def dump(self, obj: Any, target: str | TargetOnKart, custom_labels: dict[Any, Any] | None = None) -> None: ...\n\n    def dump(self, obj: Any, target: None | str | TargetOnKart = None, custom_labels: dict[str, Any] | None = None) -> None:\n        PandasTypeConfigMap().check(obj, task_namespace=self.task_namespace)\n        if self.fail_on_empty_dump:\n            if isinstance(obj, pd.DataFrame) and obj.empty:\n                raise EmptyDumpError()\n\n        required_task_outputs = map_flattenable_items(\n            lambda task: map_flattenable_items(lambda output: RequiredTaskOutput(task_name=task.get_task_family(), output_path=output.path()), task.output()),\n            self.requires(),\n        )\n\n        self._get_output_target(target).dump(\n            obj,\n            lock_at_dump=self._lock_at_dump,\n            task_params=super().to_str_params(only_significant=True, only_public=True),\n            custom_labels=custom_labels,\n            required_task_outputs=required_task_outputs,\n        )\n\n    @staticmethod\n    def get_code(target_class: Any) -> set[str]:\n        def has_sourcecode(obj):\n            return inspect.ismethod(obj) or inspect.isfunction(obj) or inspect.isframe(obj) or inspect.iscode(obj)\n\n        return {inspect.getsource(t) for _, t in inspect.getmembers(target_class, has_sourcecode)}\n\n    def get_own_code(self):\n        gokart_codes = self.get_code(TaskOnKart)\n        own_codes = self.get_code(self)\n        return ''.join(sorted(list(own_codes - gokart_codes)))\n\n    def make_unique_id(self) -> str:\n        unique_id = self.task_unique_id or self._make_hash_id()\n        if self.cache_unique_id:\n            self.task_unique_id = unique_id\n        return unique_id\n\n    def _make_hash_id(self) -> str:\n        def _to_str_params(task):\n            if isinstance(task, TaskOnKart):\n                return str(task.make_unique_id()) if task.significant else None\n\n            if not isinstance(task, luigi.Task):\n                raise ValueError(f'Task.requires method returns {type(task)}. You should return luigi.Task.')\n\n            return task.to_str_params(only_significant=True)\n\n        dependencies = [_to_str_params(task) for task in flatten(self.requires())]\n        dependencies = [d for d in dependencies if d is not None]\n        dependencies.append(self.to_str_params(only_significant=True))\n        dependencies.append(self.__class__.__name__)\n        if self.serialized_task_definition_check:\n            dependencies.append(self.get_own_code())\n        return hashlib.md5(str(dependencies).encode()).hexdigest()\n\n    def _get_input_targets(self, target: None | str | TargetOnKart | TaskOnKart[Any] | list[TaskOnKart[Any]]) -> FlattenableItems[TargetOnKart]:\n        if target is None:\n            return self.input()\n        if isinstance(target, str):\n            input = self.input()\n            assert isinstance(input, dict), f'input must be dict[str, TargetOnKart], but {type(input)} is passed.'\n            result: FlattenableItems[TargetOnKart] = input[target]\n            return result\n        if isinstance(target, Iterable):\n            return [self._get_input_targets(t) for t in target]\n        if isinstance(target, TaskOnKart):\n            requires_unique_ids = [task.make_unique_id() for task in flatten(self.requires())]\n            assert target.make_unique_id() in requires_unique_ids, f'{target} should be in requires method'\n            return target.output()\n        return target\n\n    def _get_output_target(self, target: None | str | TargetOnKart) -> TargetOnKart:\n        if target is None:\n            output = self.output()\n            assert isinstance(output, TargetOnKart), f'output must be TargetOnKart, but {type(output)} is passed.'\n            return output\n        if isinstance(target, str):\n            output = self.output()\n            assert isinstance(output, dict), f'output must be dict[str, TargetOnKart], but {type(output)} is passed.'\n            result = output[target]\n            assert isinstance(result, TargetOnKart), f'output must be dict[str, TargetOnKart], but {type(output)} is passed.'\n            return result\n        return target\n\n    def get_info(self, only_significant=False):\n        params_str = {}\n        params = dict(self.get_params())\n        for param_name, param_value in self.param_kwargs.items():\n            if (not only_significant) or params[param_name].significant:\n                if isinstance(params[param_name], gokart.TaskInstanceParameter):\n                    params_str[param_name] = type(param_value).__name__ + '-' + param_value.make_unique_id()\n                else:\n                    params_str[param_name] = params[param_name].serialize(param_value)\n        return params_str\n\n    def _get_task_log_target(self):\n        return self.make_target(f'log/task_log/{type(self).__name__}.pkl')\n\n    def get_task_log(self) -> dict[str, Any]:\n        target = self._get_task_log_target()\n        if self.task_log:\n            return self.task_log\n        if target.exists():\n            return cast(dict[Any, Any], self.load(target))\n        return dict()\n\n    @luigi.Task.event_handler(luigi.Event.SUCCESS)\n    def _dump_task_log(self):\n        self.task_log['file_path'] = [target.path() for target in flatten(self.output())]\n        if self.should_dump_supplementary_log_files:\n            self.dump(self.task_log, self._get_task_log_target())\n\n    def _get_task_params_target(self):\n        return self.make_target(f'log/task_params/{type(self).__name__}.pkl')\n\n    def get_task_params(self) -> dict[str, Any]:\n        target = self._get_task_log_target()\n        if target.exists():\n            return cast(dict[Any, Any], self.load(target))\n        return dict()\n\n    @luigi.Task.event_handler(luigi.Event.START)\n    def _set_random_seed(self):\n        if self.should_dump_supplementary_log_files:\n            random_seed = self._get_random_seed()\n            seed_methods = self.try_set_seed(list(self.fix_random_seed_methods), random_seed)\n            self.dump({'seed': random_seed, 'seed_methods': seed_methods}, self._get_random_seeds_target())\n\n    def _get_random_seeds_target(self):\n        return self.make_target(f'log/random_seed/{type(self).__name__}.pkl')\n\n    @staticmethod\n    def try_set_seed(methods: list[str], random_seed: int) -> list[str]:\n        success_methods: list[str] = []\n        for method_name in methods:\n            try:\n                parts = method_name.split('.')\n                m: Any = import_module(parts[0])\n                for x in parts[1:]:\n                    m = getattr(m, x)\n                m(random_seed)\n                success_methods.append(method_name)\n            except ModuleNotFoundError:\n                pass\n            except AttributeError:\n                pass\n        return success_methods\n\n    def _get_random_seed(self):\n        if self.fix_random_seed_value and (not self.fix_random_seed_value == self.FIX_RANDOM_SEED_VALUE_NONE_MAGIC_NUMBER):\n            return self.fix_random_seed_value\n        return int(self.make_unique_id(), 16) % (2**32 - 1)  # maximum numpy.random.seed\n\n    @luigi.Task.event_handler(luigi.Event.START)\n    def _dump_task_params(self):\n        if self.should_dump_supplementary_log_files:\n            self.dump(self.to_str_params(only_significant=True), self._get_task_params_target())\n\n    def _get_processing_time_target(self):\n        return self.make_target(f'log/processing_time/{type(self).__name__}.pkl')\n\n    def get_processing_time(self) -> str:\n        target = self._get_processing_time_target()\n        if target.exists():\n            return cast(str, self.load(target))\n        return 'unknown'\n\n    @luigi.Task.event_handler(luigi.Event.PROCESSING_TIME)\n    def _dump_processing_time(self, processing_time):\n        if self.should_dump_supplementary_log_files:\n            self.dump(processing_time, self._get_processing_time_target())\n\n    @classmethod\n    def restore(cls, unique_id):\n        params = TaskOnKart().make_target(f'log/task_params/{cls.__name__}_{unique_id}.pkl', use_unique_id=False).load()\n        return cls.from_str_params(params)\n\n    @luigi.Task.event_handler(luigi.Event.FAILURE)\n    def _log_unique_id(self, exception):\n        logger.info(f'FAILURE:\\n    task name={type(self).__name__}\\n    unique id={self.make_unique_id()}')\n\n    @luigi.Task.event_handler(luigi.Event.START)\n    def _dump_module_versions(self):\n        if self.should_dump_supplementary_log_files:\n            self.dump(self._get_module_versions(), self._get_module_versions_target())\n\n    def _get_module_versions_target(self):\n        return self.make_target(f'log/module_versions/{type(self).__name__}.txt')\n\n    def _get_module_versions(self) -> str:\n        module_versions = []\n        for x in set([x.split('.')[0] for x in globals().keys() if isinstance(x, types.ModuleType) and '_' not in x]):\n            module = import_module(x)\n            if '__version__' in dir(module):\n                if isinstance(module.__version__, str):\n                    version = module.__version__.split(' ')[0]\n                else:\n                    version = '.'.join([str(v) for v in module.__version__])\n                module_versions.append(f'{x}=={version}')\n        return '\\n'.join(module_versions)\n\n    def __repr__(self):\n        \"\"\"\n        Build a task representation like\n        `MyTask[aca2f28555dadd0f1e3dee3d4b973651](param1=1.5, param2='5', data_task=DataTask(c1f5d06aa580c5761c55bd83b18b0b4e))`\n        \"\"\"\n        return self._get_task_string()\n\n    def __str__(self):\n        \"\"\"\n        Build a human-readable task representation like\n        `MyTask[aca2f28555dadd0f1e3dee3d4b973651](param1=1.5, param2='5', data_task=DataTask(c1f5d06aa580c5761c55bd83b18b0b4e))`\n        This includes only public parameters\n        \"\"\"\n        return self._get_task_string(only_public=True)\n\n    def _get_task_string(self, only_public=False):\n        \"\"\"\n        Convert a task representation like `MyTask(param1=1.5, param2='5', data_task=DataTask(id=35tyi))`\n        \"\"\"\n        params = self.get_params()\n        param_values = self.get_param_values(params, [], self.param_kwargs)\n\n        # Build up task id\n        repr_parts = []\n        param_objs = dict(params)\n        for param_name, param_value in param_values:\n            param_obj = param_objs[param_name]\n            if param_obj.significant and ((not only_public) or param_obj.visibility == ParameterVisibility.PUBLIC):\n                repr_parts.append(f'{param_name}={self._make_representation(param_obj, param_value)}')\n\n        task_str = f'{self.get_task_family()}[{self.make_unique_id()}]({\", \".join(repr_parts)})'\n        return task_str\n\n    def _make_representation(self, param_obj: luigi.Parameter, param_value: Any) -> str:\n        if isinstance(param_obj, TaskInstanceParameter):\n            return f'{param_value.get_task_family()}({param_value.make_unique_id()})'\n        if isinstance(param_obj, ListTaskInstanceParameter):\n            return f'[{\", \".join(f\"{v.get_task_family()}({v.make_unique_id()})\" for v in param_value)}]'\n        return str(param_obj.serialize(param_value))\n"
  },
  {
    "path": "gokart/task_complete_check.py",
    "content": "from __future__ import annotations\n\nimport functools\nfrom collections.abc import Callable\nfrom logging import getLogger\nfrom typing import Any\n\nlogger = getLogger(__name__)\n\n\ndef task_complete_check_wrapper(run_func: Callable[..., Any], complete_check_func: Callable[..., Any]) -> Callable[..., Any]:\n    @functools.wraps(run_func)\n    def wrapper(*args, **kwargs):\n        if complete_check_func():\n            logger.warning(f'{run_func.__name__} is skipped because the task is already completed.')\n            return\n        return run_func(*args, **kwargs)\n\n    return wrapper\n"
  },
  {
    "path": "gokart/testing/__init__.py",
    "content": "__all__ = [\n    'test_run',\n    'try_to_run_test_for_empty_data_frame',\n    'assert_frame_contents_equal',\n]\n\nfrom gokart.testing.check_if_run_with_empty_data_frame import test_run, try_to_run_test_for_empty_data_frame\nfrom gokart.testing.pandas_assert import assert_frame_contents_equal\n"
  },
  {
    "path": "gokart/testing/check_if_run_with_empty_data_frame.py",
    "content": "from __future__ import annotations\n\nimport logging\nimport sys\nfrom typing import Any\n\nimport luigi\nfrom luigi.cmdline_parser import CmdlineParser\n\nimport gokart\nfrom gokart.utils import flatten\n\ntest_logger = logging.getLogger(__name__)\ntest_logger.addHandler(logging.StreamHandler())\ntest_logger.setLevel(logging.INFO)\n\n\nclass test_run(gokart.TaskOnKart[Any]):\n    pandas: luigi.BoolParameter = luigi.BoolParameter()\n    namespace: luigi.OptionalStrParameter = luigi.OptionalStrParameter(\n        default=None, description='When task namespace is not defined explicitly, please use \"__not_user_specified\".'\n    )\n\n\nclass _TestStatus:\n    def __init__(self, task: gokart.TaskOnKart[Any]) -> None:\n        self.namespace = task.task_namespace\n        self.name = type(task).__name__\n        self.task_id = task.make_unique_id()\n        self.status = 'OK'\n        self.message: Exception | None = None\n\n    def format(self) -> str:\n        s = f'status={self.status}; namespace={self.namespace}; name={self.name}; id={self.task_id};'\n        if self.message:\n            s += f' message={type(self.message)}: {\", \".join(map(str, self.message.args))}'\n        return s\n\n    def fail(self) -> bool:\n        return self.status != 'OK'\n\n\ndef _get_all_tasks(task: gokart.TaskOnKart[Any]) -> list[gokart.TaskOnKart[Any]]:\n    result = [task]\n    for o in flatten(task.requires()):\n        result.extend(_get_all_tasks(o))\n    return result\n\n\ndef _run_with_test_status(task: gokart.TaskOnKart[Any]) -> _TestStatus:\n    test_message = _TestStatus(task)\n    try:\n        task.run()\n    except Exception as e:\n        test_message.status = 'NG'\n        test_message.message = e\n    return test_message\n\n\ndef _test_run_with_empty_data_frame(cmdline_args: list[str], test_run_params: test_run) -> None:\n    from unittest.mock import patch\n\n    try:\n        gokart.run(cmdline_args=cmdline_args)\n    except SystemExit as e:\n        assert e.code == 0, f'original workflow does not run properly. It exited with error code {e}.'\n\n    with CmdlineParser.global_instance(cmdline_args) as cp:\n        all_tasks = _get_all_tasks(cp.get_task_obj())\n\n    if test_run_params.namespace is not None:\n        all_tasks = [t for t in all_tasks if t.task_namespace == test_run_params.namespace]\n\n    with patch('gokart.TaskOnKart.dump', new=lambda *args, **kwargs: None):\n        test_status_list = [_run_with_test_status(t) for t in all_tasks]\n\n    test_logger.info('gokart test results:\\n' + '\\n'.join(s.format() for s in test_status_list))\n    if any(s.fail() for s in test_status_list):\n        sys.exit(1)\n\n\ndef try_to_run_test_for_empty_data_frame(cmdline_args: list[str]) -> None:\n    with CmdlineParser.global_instance(cmdline_args):\n        test_run_params = test_run()\n\n    if test_run_params.pandas:\n        cmdline_args = [a for a in cmdline_args if not a.startswith('--test-run-')]\n        _test_run_with_empty_data_frame(cmdline_args=cmdline_args, test_run_params=test_run_params)\n        sys.exit(0)\n"
  },
  {
    "path": "gokart/testing/pandas_assert.py",
    "content": "from __future__ import annotations\n\nfrom typing import Any\n\nimport pandas as pd\n\n\ndef assert_frame_contents_equal(actual: pd.DataFrame, expected: pd.DataFrame, **kwargs: Any) -> None:\n    \"\"\"\n    Assert that two DataFrames are equal.\n    This function is mostly same as pandas.testing.assert_frame_equal, however\n    - this fuction ignores the order of index and columns.\n    - this function fails when duplicated index or columns are found.\n\n    Parameters\n    ----------\n    - actual, expected: pd.DataFrame\n        DataFrames to be compared.\n    - kwargs: Any\n        Parameters passed to pandas.testing.assert_frame_equal.\n    \"\"\"\n    assert isinstance(actual, pd.DataFrame), 'actual is not a DataFrame'\n    assert isinstance(expected, pd.DataFrame), 'expected is not a DataFrame'\n\n    assert actual.index.is_unique, 'actual index is not unique'\n    assert expected.index.is_unique, 'expected index is not unique'\n    assert actual.columns.is_unique, 'actual columns is not unique'\n    assert expected.columns.is_unique, 'expected columns is not unique'\n\n    assert set(actual.columns) == set(expected.columns), 'columns are not equal'\n    assert set(actual.index) == set(expected.index), 'indexes are not equal'\n\n    expected_reindexed = expected.reindex(actual.index)[actual.columns]\n    pd.testing.assert_frame_equal(actual, expected_reindexed, **kwargs)\n"
  },
  {
    "path": "gokart/tree/task_info.py",
    "content": "from __future__ import annotations\n\nimport os\nfrom typing import Any\n\nimport pandas as pd\n\nfrom gokart.target import make_target\nfrom gokart.task import TaskOnKart\nfrom gokart.tree.task_info_formatter import make_task_info_tree, make_tree_info, make_tree_info_table_list\n\n\ndef make_task_info_as_tree_str(task: TaskOnKart[Any], details: bool = False, abbr: bool = True, ignore_task_names: list[str] | None = None) -> str:\n    \"\"\"\n    Return a string representation of the tasks, their statuses/parameters in a dependency tree format\n\n    Parameters\n    ----------\n    - task: TaskOnKart\n        Root task.\n    - details: bool\n        Whether or not to output details.\n    - abbr: bool\n        Whether or not to simplify tasks information that has already appeared.\n    - ignore_task_names: list[str] | None\n        List of task names to ignore.\n    Returns\n    -------\n    - tree_info : str\n        Formatted task dependency tree.\n    \"\"\"\n    task_info = make_task_info_tree(task, ignore_task_names=ignore_task_names)\n    result = make_tree_info(task_info=task_info, indent='', last=True, details=details, abbr=abbr, visited_tasks=set())\n    return result\n\n\ndef make_task_info_as_table(task: TaskOnKart[Any], ignore_task_names: list[str] | None = None) -> pd.DataFrame:\n    \"\"\"Return a table containing information about dependent tasks.\n\n    Parameters\n    ----------\n    - task: TaskOnKart\n        Root task.\n    - ignore_task_names: list[str] | None\n        List of task names to ignore.\n    Returns\n    -------\n    - task_info_table : pandas.DataFrame\n        Formatted task dependency table.\n    \"\"\"\n\n    task_info = make_task_info_tree(task, ignore_task_names=ignore_task_names)\n    task_info_table = pd.DataFrame(make_tree_info_table_list(task_info=task_info, visited_tasks=set()))\n\n    return task_info_table\n\n\ndef dump_task_info_table(task: TaskOnKart[Any], task_info_dump_path: str, ignore_task_names: list[str] | None = None) -> None:\n    \"\"\"Dump a table containing information about dependent tasks.\n\n    Parameters\n    ----------\n    - task: TaskOnKart\n        Root task.\n    - task_info_dump_path: str\n        Output target file path. Path destination can be `local`, `S3`, or `GCS`.\n        File extension can be any type that gokart file processor accepts, including `csv`, `pickle`, or `txt`.\n        See `TaskOnKart.make_target module <https://gokart.readthedocs.io/en/latest/task_on_kart.html#taskonkart-make-target>` for details.\n    - ignore_task_names: list[str] | None\n        List of task names to ignore.\n    Returns\n    -------\n    None\n    \"\"\"\n    task_info_table = make_task_info_as_table(task=task, ignore_task_names=ignore_task_names)\n\n    unique_id = task.make_unique_id()\n\n    task_info_target = make_target(file_path=task_info_dump_path, unique_id=unique_id)\n    task_info_target.dump(obj=task_info_table, lock_at_dump=False)\n\n\ndef dump_task_info_tree(task: TaskOnKart[Any], task_info_dump_path: str, ignore_task_names: list[str] | None = None, use_unique_id: bool = True) -> None:\n    \"\"\"Dump the task info tree object (TaskInfo) to a pickle file.\n\n    Parameters\n    ----------\n    - task: TaskOnKart\n        Root task.\n    - task_info_dump_path: str\n        Output target file path. Path destination can be `local`, `S3`, or `GCS`.\n        File extension must be '.pkl'.\n    - ignore_task_names: list[str] | None\n        List of task names to ignore.\n    - use_unique_id: bool = True\n        Whether to use unique id to dump target file. Default is True.\n    Returns\n    -------\n    None\n    \"\"\"\n    extension = os.path.splitext(task_info_dump_path)[1]\n    assert extension == '.pkl', f'File extention must be `.pkl`, not `{extension}`.'\n\n    task_info_tree = make_task_info_tree(task, ignore_task_names=ignore_task_names)\n\n    unique_id = task.make_unique_id() if use_unique_id else None\n\n    task_info_target = make_target(file_path=task_info_dump_path, unique_id=unique_id)\n    task_info_target.dump(obj=task_info_tree, lock_at_dump=False)\n"
  },
  {
    "path": "gokart/tree/task_info_formatter.py",
    "content": "from __future__ import annotations\n\nimport typing\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Any, NamedTuple\n\nfrom gokart.task import TaskOnKart\nfrom gokart.utils import FlattenableItems, flatten\n\n\n@dataclass\nclass TaskInfo:\n    name: str\n    unique_id: str\n    output_paths: list[str]\n    params: dict[str, Any]\n    processing_time: str\n    is_complete: str\n    task_log: dict[str, Any]\n    requires: FlattenableItems[RequiredTask]\n    children_task_infos: list[TaskInfo]\n\n    def get_task_id(self):\n        return f'{self.name}_{self.unique_id}'\n\n    def get_task_title(self):\n        return f'({self.is_complete}) {self.name}[{self.unique_id}]'\n\n    def get_task_detail(self):\n        return f'(parameter={self.params}, output={self.output_paths}, time={self.processing_time}, task_log={self.task_log})'\n\n    def task_info_dict(self):\n        return dict(\n            name=self.name,\n            unique_id=self.unique_id,\n            output_paths=self.output_paths,\n            params=self.params,\n            processing_time=self.processing_time,\n            is_complete=self.is_complete,\n            task_log=self.task_log,\n            requires=self.requires,\n        )\n\n\nclass RequiredTask(NamedTuple):\n    name: str\n    unique_id: str\n\n\ndef _make_requires_info(requires):\n    if isinstance(requires, TaskOnKart):\n        return RequiredTask(name=requires.__class__.__name__, unique_id=requires.make_unique_id())\n    elif isinstance(requires, dict):\n        return {key: _make_requires_info(requires=item) for key, item in requires.items()}\n    elif isinstance(requires, typing.Iterable):\n        return [_make_requires_info(requires=item) for item in requires]\n\n    raise TypeError(f'`requires` has unexpected type {type(requires)}. Must be `TaskOnKart`, `Iterarble[TaskOnKart]`, or `Dict[str, TaskOnKart]`')\n\n\ndef make_task_info_tree(task: TaskOnKart[Any], ignore_task_names: list[str] | None = None, cache: dict[str, TaskInfo] | None = None) -> TaskInfo:\n    with warnings.catch_warnings():\n        warnings.filterwarnings(action='ignore', message='Task .* without outputs has no custom complete() method')\n        is_task_complete = task.complete()\n\n    name = task.__class__.__name__\n    unique_id = task.make_unique_id()\n    output_paths: list[str] = [t.path() for t in flatten(task.output())]\n\n    cache = {} if cache is None else cache\n    cache_id = f'{name}_{unique_id}_{is_task_complete}'\n    if cache_id in cache:\n        return cache[cache_id]\n\n    params = task.get_info(only_significant=True)\n    processing_time = task.get_processing_time()\n    if isinstance(processing_time, float):\n        processing_time = str(processing_time) + 's'\n    is_complete = 'COMPLETE' if is_task_complete else 'PENDING'\n    task_log = dict(task.get_task_log())\n    requires = _make_requires_info(task.requires())\n\n    children = flatten(task.requires())\n    children_task_infos: list[TaskInfo] = []\n    for child in children:\n        if ignore_task_names is None or child.__class__.__name__ not in ignore_task_names:\n            children_task_infos.append(make_task_info_tree(child, ignore_task_names=ignore_task_names, cache=cache))\n    task_info = TaskInfo(\n        name=name,\n        unique_id=unique_id,\n        output_paths=output_paths,\n        params=params,\n        processing_time=processing_time,\n        is_complete=is_complete,\n        task_log=task_log,\n        requires=requires,\n        children_task_infos=children_task_infos,\n    )\n    cache[cache_id] = task_info\n    return task_info\n\n\ndef make_tree_info(task_info: TaskInfo, indent: str, last: bool, details: bool, abbr: bool, visited_tasks: set[str]) -> str:\n    result = '\\n' + indent\n    if last:\n        result += '└─-'\n        indent += '   '\n    else:\n        result += '|--'\n        indent += '|  '\n    result += task_info.get_task_title()\n\n    if abbr:\n        task_id = task_info.get_task_id()\n        if task_id not in visited_tasks:\n            visited_tasks.add(task_id)\n        else:\n            result += f'\\n{indent}└─- ...'\n            return result\n\n    if details:\n        result += task_info.get_task_detail()\n\n    children = task_info.children_task_infos\n    for index, child in enumerate(children):\n        result += make_tree_info(child, indent, (index + 1) == len(children), details=details, abbr=abbr, visited_tasks=visited_tasks)\n    return result\n\n\ndef make_tree_info_table_list(task_info: TaskInfo, visited_tasks: set[str]) -> list[dict[str, typing.Any]]:\n    task_id = task_info.get_task_id()\n    if task_id in visited_tasks:\n        return []\n    visited_tasks.add(task_id)\n\n    result = [task_info.task_info_dict()]\n\n    children = task_info.children_task_infos\n    for child in children:\n        result += make_tree_info_table_list(task_info=child, visited_tasks=visited_tasks)\n    return result\n"
  },
  {
    "path": "gokart/utils.py",
    "content": "from __future__ import annotations\n\nimport os\nfrom collections.abc import Callable, Iterable\nfrom io import BytesIO\nfrom typing import Any, Literal, Protocol, TypeAlias, TypeVar, get_args, get_origin\n\nimport dill\nimport luigi\nimport pandas as pd\n\n\nclass FileLike(Protocol):\n    def read(self, n: int) -> bytes: ...\n\n    def readline(self) -> bytes: ...\n\n    def seek(self, offset: int) -> None: ...\n\n    def seekable(self) -> bool: ...\n\n\ndef add_config(file_path: str) -> None:\n    _, ext = os.path.splitext(file_path)\n    luigi.configuration.core.parser = ext  # type: ignore\n    assert luigi.configuration.add_config_path(file_path)\n\n\nT = TypeVar('T')\nFlattenableItems: TypeAlias = T | Iterable['FlattenableItems[T]'] | dict[str, 'FlattenableItems[T]']\n\n\ndef flatten(targets: FlattenableItems[T]) -> list[T]:\n    \"\"\"\n    Creates a flat list of all items in structured output (dicts, lists, items):\n\n    .. code-block:: python\n\n        >>> sorted(flatten({'a': 'foo', 'b': 'bar'}))\n        ['bar', 'foo']\n        >>> sorted(flatten(['foo', ['bar', 'troll']]))\n        ['bar', 'foo', 'troll']\n        >>> flatten('foo')\n        ['foo']\n        >>> flatten(42)\n        [42]\n\n    This method is copied and modified from [luigi.task.flatten](https://github.com/spotify/luigi/blob/367edc2e3a099b8a0c2d15b1676269e33ad06117/luigi/task.py#L958) in accordance with [Apache License 2.0](https://github.com/spotify/luigi/blob/367edc2e3a099b8a0c2d15b1676269e33ad06117/LICENSE).\n    \"\"\"\n    if targets is None:\n        return []\n    flat = []\n    if isinstance(targets, dict):\n        for _, result in targets.items():\n            flat += flatten(result)\n        return flat\n\n    if isinstance(targets, str):\n        return [targets]  # type: ignore\n\n    if not isinstance(targets, Iterable):\n        return [targets]\n\n    for result in targets:\n        flat += flatten(result)\n    return flat\n\n\nK = TypeVar('K')\n\n\ndef map_flattenable_items(func: Callable[[T], K], items: FlattenableItems[T]) -> FlattenableItems[K]:\n    if isinstance(items, dict):\n        return {k: map_flattenable_items(func, v) for k, v in items.items()}\n    if isinstance(items, tuple):\n        return tuple(map_flattenable_items(func, i) for i in items)\n    if isinstance(items, str):\n        return func(items)  # type: ignore\n    if isinstance(items, Iterable):\n        return list(map(lambda item: map_flattenable_items(func, item), items))\n    return func(items)\n\n\ndef load_dill_with_pandas_backward_compatibility(file: FileLike | BytesIO) -> Any:\n    \"\"\"Load binary dumped by dill with pandas backward compatibility.\n    pd.read_pickle can load binary dumped in backward pandas version, and also any objects dumped by pickle.\n    It is unclear whether all objects dumped by dill can be loaded by pd.read_pickle, we use dill.load as a fallback.\n    \"\"\"\n    try:\n        return dill.load(file)\n    except Exception:\n        assert file.seekable(), f'{file} is not seekable.'\n        file.seek(0)\n        return pd.read_pickle(file)\n\n\ndef get_dataframe_type_from_task(task: Any) -> Literal['pandas', 'polars', 'polars-lazy']:\n    \"\"\"\n    Extract DataFrame type from TaskOnKart[T] type parameter.\n\n    Examines the type parameter T of a TaskOnKart subclass to determine\n    whether it uses pandas or polars DataFrames/LazyFrames.\n\n    Args:\n        task: A TaskOnKart instance or class\n\n    Returns:\n        'pandas', 'polars', or 'polars-lazy' (defaults to 'pandas' if type cannot be determined)\n\n    Examples:\n        >>> class MyTask(TaskOnKart[pd.DataFrame]): pass\n        >>> get_dataframe_type_from_task(MyTask())\n        'pandas'\n\n        >>> class MyPolarsTask(TaskOnKart[pl.DataFrame]): pass\n        >>> get_dataframe_type_from_task(MyPolarsTask())\n        'polars'\n    \"\"\"\n    task_class = task if isinstance(task, type) else task.__class__\n\n    # Walk the MRO to find TaskOnKart[...] even when defined on a parent class\n    mro = task_class.mro() if hasattr(task_class, 'mro') else [task_class]\n\n    for cls in mro:\n        for base in getattr(cls, '__orig_bases__', ()):\n            origin = get_origin(base)\n            if origin and hasattr(origin, '__name__') and origin.__name__ == 'TaskOnKart':\n                args = get_args(base)\n                if not args:\n                    continue\n                df_type = args[0]\n                module = getattr(df_type, '__module__', '')\n\n                # Check module name to determine DataFrame type\n                if 'polars' in module:\n                    name = getattr(df_type, '__name__', '')\n                    if name == 'LazyFrame':\n                        return 'polars-lazy'\n                    return 'polars'\n                elif 'pandas' in module:\n                    return 'pandas'\n\n    return 'pandas'  # Default to pandas for backward compatibility\n"
  },
  {
    "path": "gokart/worker.py",
    "content": "#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nThe worker communicates with the scheduler and does two things:\n\n1. Sends all tasks that has to be run\n2. Gets tasks from the scheduler that should be run\n\nWhen running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.Scheduler` instance.\nWhen you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance.\n\nEverything in this module is private to luigi and may change in incompatible\nways between versions. The exception is the exception types and the\n:py:class:`worker` config class.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport collections\nimport collections.abc\nimport contextlib\nimport datetime\nimport functools\nimport getpass\nimport importlib\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport queue as Queue\nimport random\nimport signal\nimport socket\nimport subprocess\nimport sys\nimport threading\nimport time\nimport traceback\nfrom collections.abc import Generator\nfrom typing import Any, Literal, cast\n\nimport luigi\nimport luigi.scheduler\nimport luigi.worker\nfrom luigi import notifications\nfrom luigi.event import Event\nfrom luigi.scheduler import DISABLED, DONE, FAILED, PENDING, UNKNOWN, WORKER_STATE_ACTIVE, WORKER_STATE_DISABLED, RetryPolicy, Scheduler\nfrom luigi.target import Target\nfrom luigi.task import DynamicRequirements, Task, flatten\nfrom luigi.task_register import TaskClassException, load_task\nfrom luigi.task_status import RUNNING\n\nfrom gokart.parameter import ExplicitBoolParameter\n\nlogger = logging.getLogger(__name__)\n\n# Use fork context instead of the default (spawn on macOS), which ensures compatibility with gokart's multiprocessing requirements.\n_fork_context = multiprocessing.get_context('fork')\n_ForkProcess = _fork_context.Process\n\n# Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex,\n# that may not be unlocked in child process, resulting in the process being locked indefinitely.\nfork_lock = threading.Lock()\n\n# Why we assert on _WAIT_INTERVAL_EPS:\n# multiprocessing.Queue.get() is undefined for timeout=0 it seems:\n# https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.get.\n# I also tried with really low epsilon, but then ran into the same issue where\n# the test case \"test_external_dependency_worker_is_patient\" got stuck. So I\n# unscientifically just set the final value to a floating point number that\n# \"worked for me\".\n_WAIT_INTERVAL_EPS = 0.00001\n\n\ndef _is_external(task: Task) -> bool:\n    return task.run is None or task.run == NotImplemented\n\n\ndef _get_retry_policy_dict(task: Task) -> dict[str, Any]:\n    return RetryPolicy(task.retry_count, task.disable_hard_timeout, task.disable_window)._asdict()  # type: ignore\n\n\nGetWorkResponse = collections.namedtuple(\n    'GetWorkResponse',\n    (\n        'task_id',\n        'running_tasks',\n        'n_pending_tasks',\n        'n_unique_pending',\n        'n_pending_last_scheduled',\n        'worker_state',\n    ),\n)\n\n\nclass TaskProcess(_ForkProcess):  # type: ignore[valid-type, misc]\n    \"\"\"Wrap all task execution in this class.\n\n    Mainly for convenience since this is run in a separate process.\"\"\"\n\n    # mapping of status_reporter attributes to task attributes that are added to tasks\n    # before they actually run, and removed afterwards\n    forward_reporter_attributes = {\n        'update_tracking_url': 'set_tracking_url',\n        'update_status_message': 'set_status_message',\n        'update_progress_percentage': 'set_progress_percentage',\n        'decrease_running_resources': 'decrease_running_resources',\n        'scheduler_messages': 'scheduler_messages',\n    }\n\n    def __init__(\n        self,\n        task: luigi.Task,\n        worker_id: str,\n        result_queue: multiprocessing.Queue[Any],\n        status_reporter: luigi.worker.TaskStatusReporter,\n        use_multiprocessing: bool = False,\n        worker_timeout: int = 0,\n        check_unfulfilled_deps: bool = True,\n        check_complete_on_run: bool = False,\n        task_completion_cache: dict[str, Any] | None = None,\n        task_completion_check_at_run: bool = True,\n    ) -> None:\n        super().__init__()\n        self.task = task\n        self.worker_id = worker_id\n        self.result_queue = result_queue\n        self.status_reporter = status_reporter\n        self.worker_timeout = task.worker_timeout if task.worker_timeout is not None else worker_timeout\n        self.timeout_time = time.time() + self.worker_timeout if self.worker_timeout else None\n        self.use_multiprocessing = use_multiprocessing or self.timeout_time is not None\n        self.check_unfulfilled_deps = check_unfulfilled_deps\n        self.check_complete_on_run = check_complete_on_run\n        self.task_completion_cache = task_completion_cache\n        self.task_completion_check_at_run = task_completion_check_at_run\n\n        # completeness check using the cache\n        self.check_complete = functools.partial(luigi.worker.check_complete_cached, completion_cache=task_completion_cache)\n\n    def _run_task(self) -> collections.abc.Generator[Any, Any, Any] | None:\n        if self.task_completion_check_at_run and self.check_complete(self.task):\n            logger.warning(f'{self.task} is skipped because the task is already completed.')\n            return None\n        return cast(collections.abc.Generator[Any, Any, Any] | None, self.task.run())\n\n    def _run_get_new_deps(self) -> list[tuple[str, str, dict[str, str]]] | None:\n        task_gen = self._run_task()\n\n        if not isinstance(task_gen, collections.abc.Generator):\n            return None\n\n        next_send = None\n        while True:\n            try:\n                if next_send is None:\n                    requires = next(task_gen)\n                else:\n                    requires = task_gen.send(next_send)\n            except StopIteration:\n                return None\n\n            # if requires is not a DynamicRequirements, create one to use its default behavior\n            if not isinstance(requires, DynamicRequirements):\n                requires = DynamicRequirements(requires)\n\n            if not requires.complete(self.check_complete):\n                # not all requirements are complete, return them which adds them to the tree\n                new_deps = [(t.task_module, t.task_family, t.to_str_params()) for t in requires.flat_requirements]\n                return new_deps\n\n            # get the next generator result\n            next_send = requires.paths\n\n    def run(self) -> None:\n        logger.info('[pid %s] Worker %s running   %s', os.getpid(), self.worker_id, self.task)\n\n        if self.use_multiprocessing:\n            # Need to have different random seeds if running in separate processes\n            processID = os.getpid()\n            currentTime = time.time()\n            random.seed(processID * currentTime)\n\n        status: str | None = FAILED\n        expl = ''\n        missing: list[str] = []\n        new_deps: list[tuple[str, str, dict[str, str]]] | None = []\n        try:\n            # Verify that all the tasks are fulfilled! For external tasks we\n            # don't care about unfulfilled dependencies, because we are just\n            # checking completeness of self.task so outputs of dependencies are\n            # irrelevant.\n            if self.check_unfulfilled_deps and not _is_external(self.task):\n                missing = []\n                for dep in self.task.deps():\n                    if not self.check_complete(dep):\n                        nonexistent_outputs = [output for output in flatten(dep.output()) if not output.exists()]\n                        if nonexistent_outputs:\n                            missing.append(f'{dep.task_id} ({\", \".join(map(str, nonexistent_outputs))})')\n                        else:\n                            missing.append(dep.task_id)\n                if missing:\n                    deps = 'dependency' if len(missing) == 1 else 'dependencies'\n                    raise RuntimeError('Unfulfilled {} at run time: {}'.format(deps, ', '.join(missing)))\n            self.task.trigger_event(Event.START, self.task)\n            t0 = time.time()\n            status = None\n\n            if _is_external(self.task):\n                # External task\n                if self.check_complete(self.task):\n                    status = DONE\n                else:\n                    status = FAILED\n                    expl = 'Task is an external data dependency and data does not exist (yet?).'\n            else:\n                with self._forward_attributes():\n                    new_deps = self._run_get_new_deps()\n                if not new_deps:\n                    if not self.check_complete_on_run:\n                        # update the cache\n                        if self.task_completion_cache is not None:\n                            self.task_completion_cache[self.task.task_id] = True\n                        status = DONE\n                    elif self.check_complete(self.task):\n                        status = DONE\n                    else:\n                        raise luigi.worker.TaskException('Task finished running, but complete() is still returning false.')\n                else:\n                    status = PENDING\n\n            if new_deps:\n                logger.info('[pid %s] Worker %s new requirements      %s', os.getpid(), self.worker_id, self.task)\n            elif status == DONE:\n                self.task.trigger_event(Event.PROCESSING_TIME, self.task, time.time() - t0)\n                expl = self.task.on_success()\n                logger.info('[pid %s] Worker %s done      %s', os.getpid(), self.worker_id, self.task)\n                self.task.trigger_event(Event.SUCCESS, self.task)\n\n        except KeyboardInterrupt:\n            raise\n        except BaseException as ex:\n            status = FAILED\n            expl = self._handle_run_exception(ex)\n\n        finally:\n            self.result_queue.put((self.task.task_id, status, expl, missing, new_deps))\n\n    def _handle_run_exception(self, ex: BaseException) -> str:\n        logger.exception('[pid %s] Worker %s failed    %s', os.getpid(), self.worker_id, self.task)\n        self.task.trigger_event(Event.FAILURE, self.task, ex)\n        return cast(str, self.task.on_failure(ex))\n\n    def _recursive_terminate(self) -> None:\n        import psutil\n\n        try:\n            parent = psutil.Process(self.pid)\n            children = parent.children(recursive=True)\n\n            # terminate parent. Give it a chance to clean up\n            super().terminate()\n            parent.wait()\n\n            # terminate children\n            for child in children:\n                try:\n                    child.terminate()\n                except psutil.NoSuchProcess:\n                    continue\n        except psutil.NoSuchProcess:\n            return\n\n    def terminate(self) -> None:\n        \"\"\"Terminate this process and its subprocesses.\"\"\"\n        # default terminate() doesn't cleanup child processes, it orphans them.\n        try:\n            return self._recursive_terminate()\n        except ImportError:\n            super().terminate()\n\n    @contextlib.contextmanager\n    def _forward_attributes(self):\n        # forward configured attributes to the task\n        for reporter_attr, task_attr in self.forward_reporter_attributes.items():\n            setattr(self.task, task_attr, getattr(self.status_reporter, reporter_attr))\n        try:\n            yield self\n        finally:\n            # reset attributes again\n            for _, task_attr in self.forward_reporter_attributes.items():\n                setattr(self.task, task_attr, None)\n\n\n# This code and the task_process_context config key currently feels a bit ad-hoc.\n# Discussion on generalizing it into a plugin system: https://github.com/spotify/luigi/issues/1897\nclass ContextManagedTaskProcess(TaskProcess):\n    def __init__(self, context: Any, *args: Any, **kwargs: Any) -> None:\n        super().__init__(*args, **kwargs)\n        self.context = context\n\n    def run(self) -> None:\n        if self.context:\n            logger.debug('Importing module and instantiating ' + self.context)\n            module_path, class_name = self.context.rsplit('.', 1)\n            module = importlib.import_module(module_path)\n            cls = getattr(module, class_name)\n\n            with cls(self):\n                super().run()\n        else:\n            super().run()\n\n\nclass gokart_worker(luigi.Config):\n    \"\"\"Configuration for the gokart worker.\n\n    You can set these options of section [gokart_worker] in your luigi.cfg file.\n\n    NOTE: use snake_case for this class to match the luigi.Config convention.\n    \"\"\"\n\n    id: luigi.StrParameter = luigi.StrParameter(default='', description='Override the auto-generated worker_id')\n    ping_interval: luigi.FloatParameter = luigi.FloatParameter(\n        default=1.0,\n        config_path=dict(section='core', name='worker-ping-interval'),  # type: ignore  # fix https://github.com/spotify/luigi/pull/3403\n    )\n    keep_alive: luigi.BoolParameter = luigi.BoolParameter(\n        default=False,\n        config_path=dict(section='core', name='worker-keep-alive'),  # type: ignore  # fix https://github.com/spotify/luigi/pull/3403\n    )\n    count_uniques: luigi.BoolParameter = luigi.BoolParameter(\n        default=False,\n        config_path=dict(section='core', name='worker-count-uniques'),  # type: ignore  # fix https://github.com/spotify/luigi/pull/3403\n        description='worker-count-uniques means that we will keep a worker alive only if it has a unique pending task, as well as having keep-alive true',\n    )\n    count_last_scheduled: luigi.BoolParameter = luigi.BoolParameter(\n        default=False, description='Keep a worker alive only if there are pending tasks which it was the last to schedule.'\n    )\n    wait_interval: luigi.FloatParameter = luigi.FloatParameter(\n        default=1.0,\n        config_path=dict(section='core', name='worker-wait-interval'),  # type: ignore  # fix https://github.com/spotify/luigi/pull/3403\n    )\n    wait_jitter: luigi.FloatParameter = luigi.FloatParameter(default=5.0)\n\n    max_keep_alive_idle_duration: luigi.TimeDeltaParameter = luigi.TimeDeltaParameter(default=datetime.timedelta(0))\n\n    max_reschedules: luigi.IntParameter = luigi.IntParameter(\n        default=1,\n        config_path=dict(section='core', name='worker-max-reschedules'),  # type: ignore  # fix https://github.com/spotify/luigi/pull/3403\n    )\n    timeout: luigi.IntParameter = luigi.IntParameter(\n        default=0,\n        config_path=dict(section='core', name='worker-timeout'),  # type: ignore  # fix https://github.com/spotify/luigi/pull/3403\n    )\n    task_limit: luigi.OptionalIntParameter = luigi.OptionalIntParameter(\n        default=None,  # type: ignore[arg-type]  # OptionalIntParameter.__init__ inherits IntParameter's signature\n        config_path=dict(section='core', name='worker-task-limit'),  # type: ignore  # fix https://github.com/spotify/luigi/pull/3403\n    )\n    retry_external_tasks: luigi.BoolParameter = luigi.BoolParameter(\n        default=False,\n        config_path=dict(section='core', name='retry-external-tasks'),  # type: ignore  # fix https://github.com/spotify/luigi/pull/3403\n        description='If true, incomplete external tasks will be retested for completion while Luigi is running.',\n    )\n    send_failure_email: luigi.BoolParameter = luigi.BoolParameter(default=True, description='If true, send e-mails directly from the workeron failure')\n    no_install_shutdown_handler: luigi.BoolParameter = luigi.BoolParameter(\n        default=False, description='If true, the SIGUSR1 shutdown handler willNOT be install on the worker'\n    )\n    check_unfulfilled_deps: luigi.BoolParameter = luigi.BoolParameter(\n        default=True, description='If true, check for completeness of dependencies before running a task'\n    )\n    check_complete_on_run: luigi.BoolParameter = luigi.BoolParameter(\n        default=False,\n        description='If true, only mark tasks as done after running if they are complete. '\n        'Regardless of this setting, the worker will always check if external '\n        'tasks are complete before marking them as done.',\n    )\n    force_multiprocessing: luigi.BoolParameter = luigi.BoolParameter(default=False, description='If true, use multiprocessing also when running with 1 worker')\n    task_process_context: luigi.OptionalStrParameter = luigi.OptionalStrParameter(\n        default=None,\n        description='If set to a fully qualified class name, the class will '\n        'be instantiated with a TaskProcess as its constructor parameter and '\n        'applied as a context manager around its run() call, so this can be '\n        'used for obtaining high level customizable monitoring or logging of '\n        'each individual Task run.',\n    )\n    cache_task_completion: luigi.BoolParameter = luigi.BoolParameter(\n        default=False,\n        description='If true, cache the response of successful completion checks '\n        'of tasks assigned to a worker. This can especially speed up tasks with '\n        'dynamic dependencies but assumes that the completion status does not change '\n        'after it was true the first time.',\n    )\n    task_completion_check_at_run: luigi.BoolParameter = ExplicitBoolParameter(\n        default=True, description='If true, tasks completeness will be re-checked just before the run, in case they are finished elsewhere.'\n    )\n\n\nclass Worker:\n    \"\"\"\n    Worker object communicates with a scheduler.\n\n    Simple class that talks to a scheduler and:\n\n    * tells the scheduler what it has to do + its dependencies\n    * asks for stuff to do (pulls it in a loop and runs it)\n    \"\"\"\n\n    def __init__(\n        self,\n        scheduler: Scheduler | None = None,\n        worker_id: str | None = None,\n        worker_processes: int = 1,\n        assistant: bool = False,\n        config: gokart_worker | None = None,\n    ) -> None:\n        if scheduler is None:\n            scheduler = Scheduler()\n\n        self.worker_processes = int(worker_processes)\n        self._worker_info = self._generate_worker_info()\n        if config is None:\n            self._config = gokart_worker()\n        else:\n            self._config = config\n\n        worker_id = worker_id or self._config.id or self._generate_worker_id(self._worker_info)\n\n        assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, '[worker] wait_interval must be positive'\n        assert self._config.wait_jitter >= 0.0, '[worker] wait_jitter must be equal or greater than zero'\n\n        self._id = worker_id\n        self._scheduler = scheduler\n        self._assistant = assistant\n        self._stop_requesting_work = False\n\n        self.host = socket.gethostname()\n        self._scheduled_tasks: dict[str, Task] = {}\n        self._suspended_tasks: dict[str, Task] = {}\n        self._batch_running_tasks: dict[str, Any] = {}\n        self._batch_families_sent: set[str] = set()\n\n        self._first_task = None\n\n        self.add_succeeded = True\n        self.run_succeeded = True\n\n        self.unfulfilled_counts: dict[str, int] = collections.defaultdict(int)\n\n        # note that ``signal.signal(signal.SIGUSR1, fn)`` only works inside the main execution thread, which is why we\n        # provide the ability to conditionally install the hook.\n        if not self._config.no_install_shutdown_handler:\n            try:\n                signal.signal(signal.SIGUSR1, self.handle_interrupt)\n                signal.siginterrupt(signal.SIGUSR1, False)\n            except AttributeError:\n                pass\n\n        # Keep info about what tasks are running (could be in other processes)\n        self._task_result_queue: multiprocessing.Queue[Any] = _fork_context.Queue()\n        self._running_tasks: dict[str, TaskProcess] = {}\n        self._idle_since: datetime.datetime | None = None\n\n        # mp-safe dictionary for caching completation checks across task processes\n        self._task_completion_cache = None\n        if self._config.cache_task_completion:\n            self._task_completion_cache = _fork_context.Manager().dict()\n\n        # Stuff for execution_summary\n        self._add_task_history: list[Any] = []\n        self._get_work_response_history: list[Any] = []\n\n    def _add_task(self, *args, **kwargs):\n        \"\"\"\n        Call ``self._scheduler.add_task``, but store the values too so we can\n        implement :py:func:`luigi.execution_summary.summary`.\n        \"\"\"\n        task_id = kwargs['task_id']\n        status = kwargs['status']\n        runnable = kwargs['runnable']\n        task = self._scheduled_tasks.get(task_id)\n        if task:\n            self._add_task_history.append((task, status, runnable))\n            kwargs['owners'] = task._owner_list()\n\n        if task_id in self._batch_running_tasks:\n            for batch_task in self._batch_running_tasks.pop(task_id):\n                self._add_task_history.append((batch_task, status, True))\n\n        if task and kwargs.get('params'):\n            kwargs['param_visibilities'] = task._get_param_visibilities()\n\n        self._scheduler.add_task(*args, **kwargs)\n\n        logger.info('Informed scheduler that task   %s   has status   %s', task_id, status)\n\n    def __enter__(self) -> Worker:\n        \"\"\"\n        Start the KeepAliveThread.\n        \"\"\"\n        self._keep_alive_thread = luigi.worker.KeepAliveThread(self._scheduler, self._id, self._config.ping_interval, self._handle_rpc_message)\n        self._keep_alive_thread.daemon = True\n        self._keep_alive_thread.start()\n        return self\n\n    def __exit__(self, type: Any, value: Any, traceback: Any) -> Literal[False]:\n        \"\"\"\n        Stop the KeepAliveThread and kill still running tasks.\n        \"\"\"\n        self._keep_alive_thread.stop()\n        self._keep_alive_thread.join()\n        for task in self._running_tasks.values():\n            if task.is_alive():\n                task.terminate()\n        self._task_result_queue.close()\n        return False  # Don't suppress exception\n\n    def _generate_worker_info(self) -> list[tuple[str, Any]]:\n        # Generate as much info as possible about the worker\n        # Some of these calls might not be available on all OS's\n        args = [('salt', f'{random.randrange(0, 10_000_000_000):09d}'), ('workers', self.worker_processes)]\n        try:\n            args += [('host', socket.gethostname())]\n        except BaseException:\n            pass\n        try:\n            args += [('username', getpass.getuser())]\n        except BaseException:\n            pass\n        try:\n            args += [('pid', os.getpid())]\n        except BaseException:\n            pass\n        try:\n            sudo_user = os.getenv('SUDO_USER')\n            if sudo_user:\n                args.append(('sudo_user', sudo_user))\n        except BaseException:\n            pass\n        return args\n\n    def _generate_worker_id(self, worker_info: list[Any]) -> str:\n        worker_info_str = ', '.join([f'{k}={v}' for k, v in worker_info])\n        return f'Worker({worker_info_str})'\n\n    def _validate_task(self, task: Task) -> None:\n        if not isinstance(task, Task):\n            raise luigi.worker.TaskException(f'Can not schedule non-task {task}')\n\n        if not task.initialized():\n            # we can't get the repr of it since it's not initialized...\n            raise luigi.worker.TaskException(\n                f'Task of class {task.__class__.__name__} not initialized. Did you override __init__ and forget to call super(...).__init__?'\n            )\n\n    def _log_complete_error(self, task: Task, tb: str) -> None:\n        log_msg = f'Will not run {task} or any dependencies due to error in complete() method:\\n{tb}'\n        logger.warning(log_msg)\n\n    def _log_dependency_error(self, task: Task, tb: str) -> None:\n        log_msg = f'Will not run {task} or any dependencies due to error in deps() method:\\n{tb}'\n        logger.warning(log_msg)\n\n    def _log_unexpected_error(self, task: Task) -> None:\n        logger.exception('Luigi unexpected framework error while scheduling %s', task)  # needs to be called from within except clause\n\n    def _announce_scheduling_failure(self, task: Task, expl: Any) -> None:\n        try:\n            self._scheduler.announce_scheduling_failure(\n                worker=self._id,\n                task_name=str(task),\n                family=task.task_family,\n                params=task.to_str_params(only_significant=True),\n                expl=expl,\n                owners=task._owner_list(),\n            )\n        except Exception:\n            formatted_traceback = traceback.format_exc()\n            self._email_unexpected_error(task, formatted_traceback)\n            raise\n\n    def _email_complete_error(self, task: Task, formatted_traceback: str) -> None:\n        self._announce_scheduling_failure(task, formatted_traceback)\n        if self._config.send_failure_email:\n            self._email_error(\n                task,\n                formatted_traceback,\n                subject='Luigi: {task} failed scheduling. Host: {host}',\n                headline='Will not run {task} or any dependencies due to error in complete() method',\n            )\n\n    def _email_dependency_error(self, task: Task, formatted_traceback: str) -> None:\n        self._announce_scheduling_failure(task, formatted_traceback)\n        if self._config.send_failure_email:\n            self._email_error(\n                task,\n                formatted_traceback,\n                subject='Luigi: {task} failed scheduling. Host: {host}',\n                headline='Will not run {task} or any dependencies due to error in deps() method',\n            )\n\n    def _email_unexpected_error(self, task: Task, formatted_traceback: str) -> None:\n        # this sends even if failure e-mails are disabled, as they may indicate\n        # a more severe failure that may not reach other alerting methods such\n        # as scheduler batch notification\n        self._email_error(\n            task,\n            formatted_traceback,\n            subject='Luigi: Framework error while scheduling {task}. Host: {host}',\n            headline='Luigi framework error',\n        )\n\n    def _email_task_failure(self, task: Task, formatted_traceback: str) -> None:\n        if self._config.send_failure_email:\n            self._email_error(\n                task,\n                formatted_traceback,\n                subject='Luigi: {task} FAILED. Host: {host}',\n                headline='A task failed when running. Most likely run() raised an exception.',\n            )\n\n    def _email_error(self, task: Task, formatted_traceback: str, subject: str, headline: str) -> None:\n        formatted_subject = subject.format(task=task, host=self.host)\n        formatted_headline = headline.format(task=task, host=self.host)\n        command = subprocess.list2cmdline(sys.argv)\n        message = notifications.format_task_error(formatted_headline, task, command, formatted_traceback)\n        notifications.send_error_email(formatted_subject, message, task.owner_email)\n\n    def _handle_task_load_error(self, exception: Exception, task_ids: list[str]) -> None:\n        msg = 'Cannot find task(s) sent by scheduler: {}'.format(','.join(task_ids))\n        logger.exception(msg)\n        subject = f'Luigi: {msg}'\n        error_message = notifications.wrap_traceback(exception)\n        for task_id in task_ids:\n            self._add_task(\n                worker=self._id,\n                task_id=task_id,\n                status=FAILED,\n                runnable=False,\n                expl=error_message,\n            )\n        notifications.send_error_email(subject, error_message)\n\n    def add(self, task: Task, multiprocess: bool = False, processes: int = 0) -> bool:\n        \"\"\"\n        Add a Task for the worker to check and possibly schedule and run.\n\n        Returns True if task and its dependencies were successfully scheduled or completed before.\n        \"\"\"\n        if self._first_task is None and hasattr(task, 'task_id'):\n            self._first_task = task.task_id\n        self.add_succeeded = True\n        if multiprocess:\n            queue: Any = _fork_context.Manager().Queue()\n            pool: Any = _fork_context.Pool(processes=processes if processes > 0 else None)\n        else:\n            queue = luigi.worker.DequeQueue()\n            pool = luigi.worker.SingleProcessPool()\n        self._validate_task(task)\n        pool.apply_async(luigi.worker.check_complete, [task, queue, self._task_completion_cache])\n\n        # we track queue size ourselves because len(queue) won't work for multiprocessing\n        queue_size = 1\n        try:\n            seen = {task.task_id}\n            while queue_size:\n                current = queue.get()\n                queue_size -= 1\n                item, is_complete = current\n                for next in self._add(item, is_complete):\n                    if next.task_id not in seen:\n                        self._validate_task(next)\n                        seen.add(next.task_id)\n                        pool.apply_async(luigi.worker.check_complete, [next, queue, self._task_completion_cache])\n                        queue_size += 1\n        except (KeyboardInterrupt, luigi.worker.TaskException):\n            raise\n        except Exception as ex:\n            self.add_succeeded = False\n            formatted_traceback = traceback.format_exc()\n            self._log_unexpected_error(task)\n            task.trigger_event(Event.BROKEN_TASK, task, ex)\n            self._email_unexpected_error(task, formatted_traceback)\n            raise\n        finally:\n            pool.close()\n            pool.join()\n        return self.add_succeeded\n\n    def _add_task_batcher(self, task: Task) -> None:\n        family = task.task_family\n        if family not in self._batch_families_sent:\n            task_class = type(task)\n            batch_param_names = task_class.batch_param_names()\n            if batch_param_names:\n                self._scheduler.add_task_batcher(\n                    worker=self._id,\n                    task_family=family,\n                    batched_args=batch_param_names,\n                    max_batch_size=task.max_batch_size,\n                )\n            self._batch_families_sent.add(family)\n\n    def _add(self, task: Task, is_complete: bool) -> Generator[Task, None, None]:\n        if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit:\n            logger.warning('Will not run %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit)\n            deps = None\n            status = UNKNOWN\n            runnable = False\n\n        else:\n            formatted_traceback = None\n            try:\n                self._check_complete_value(is_complete)\n            except KeyboardInterrupt:\n                raise\n            except luigi.worker.AsyncCompletionException as ex:\n                formatted_traceback = ex.trace\n            except BaseException:\n                formatted_traceback = traceback.format_exc()\n\n            if formatted_traceback is not None:\n                self.add_succeeded = False\n                self._log_complete_error(task, formatted_traceback)\n                task.trigger_event(Event.DEPENDENCY_MISSING, task)\n                self._email_complete_error(task, formatted_traceback)\n                deps = None\n                status = UNKNOWN\n                runnable = False\n\n            elif is_complete:\n                deps = None\n                status = DONE\n                runnable = False\n                task.trigger_event(Event.DEPENDENCY_PRESENT, task)\n\n            elif _is_external(task):\n                deps = None\n                status = PENDING\n                runnable = self._config.retry_external_tasks\n                task.trigger_event(Event.DEPENDENCY_MISSING, task)\n                logger.warning('Data for %s does not exist (yet?). The task is an external data dependency, so it cannot be run from this luigi process.', task)\n\n            else:\n                try:\n                    deps = task.deps()\n                    self._add_task_batcher(task)\n                except Exception as ex:\n                    formatted_traceback = traceback.format_exc()\n                    self.add_succeeded = False\n                    self._log_dependency_error(task, formatted_traceback)\n                    task.trigger_event(Event.BROKEN_TASK, task, ex)\n                    self._email_dependency_error(task, formatted_traceback)\n                    deps = None\n                    status = UNKNOWN\n                    runnable = False\n                else:\n                    status = PENDING\n                    runnable = True\n\n            if task.disabled:\n                status = DISABLED\n\n            if deps:\n                for d in deps:\n                    self._validate_dependency(d)\n                    task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d)\n                    yield d  # return additional tasks to add\n\n                deps = [d.task_id for d in deps]\n\n        self._scheduled_tasks[task.task_id] = task\n        self._add_task(\n            worker=self._id,\n            task_id=task.task_id,\n            status=status,\n            deps=deps,\n            runnable=runnable,\n            priority=task.priority,\n            resources=task.process_resources(),\n            params=task.to_str_params(),\n            family=task.task_family,\n            module=task.task_module,\n            batchable=task.batchable,\n            retry_policy_dict=_get_retry_policy_dict(task),\n            accepts_messages=task.accepts_messages,\n        )\n\n    def _validate_dependency(self, dependency: Task) -> None:\n        if isinstance(dependency, Target):\n            raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class')\n        elif not isinstance(dependency, Task):\n            raise Exception(f'requires() must return Task objects but {dependency} is a {type(dependency)}')\n\n    def _check_complete_value(self, is_complete: bool | luigi.worker.TracebackWrapper) -> None:\n        if isinstance(is_complete, luigi.worker.TracebackWrapper):\n            raise luigi.worker.AsyncCompletionException(is_complete.trace)\n        if not isinstance(is_complete, bool):\n            raise Exception(f'Return value of Task.complete() must be boolean (was {is_complete!r})')\n\n    def _add_worker(self) -> None:\n        self._worker_info.append(('first_task', self._first_task))\n        self._scheduler.add_worker(self._id, self._worker_info)\n\n    def _log_remote_tasks(self, get_work_response: GetWorkResponse) -> None:\n        logger.debug('Done')\n        logger.debug('There are no more tasks to run at this time')\n        if get_work_response.running_tasks:\n            for r in get_work_response.running_tasks:\n                logger.debug('%s is currently run by worker %s', r['task_id'], r['worker'])\n        elif get_work_response.n_pending_tasks:\n            logger.debug('There are %s pending tasks possibly being run by other workers', get_work_response.n_pending_tasks)\n            if get_work_response.n_unique_pending:\n                logger.debug('There are %i pending tasks unique to this worker', get_work_response.n_unique_pending)\n            if get_work_response.n_pending_last_scheduled:\n                logger.debug('There are %i pending tasks last scheduled by this worker', get_work_response.n_pending_last_scheduled)\n\n    def _get_work_task_id(self, get_work_response: dict[str, Any]) -> str | None:\n        if get_work_response.get('task_id') is not None:\n            return cast(str, get_work_response['task_id'])\n        elif 'batch_id' in get_work_response:\n            try:\n                task = load_task(\n                    module=get_work_response.get('task_module'),\n                    task_name=get_work_response['task_family'],\n                    params_str=get_work_response['task_params'],\n                )\n            except Exception as ex:\n                self._handle_task_load_error(ex, get_work_response['batch_task_ids'])\n                self.run_succeeded = False\n                return None\n\n            self._scheduler.add_task(\n                worker=self._id,\n                task_id=task.task_id,\n                module=get_work_response.get('task_module'),\n                family=get_work_response['task_family'],\n                params=task.to_str_params(),\n                status=RUNNING,\n                batch_id=get_work_response['batch_id'],\n            )\n            return cast(str, task.task_id)\n        else:\n            return None\n\n    def _get_work(self) -> GetWorkResponse:\n        if self._stop_requesting_work:\n            return GetWorkResponse(None, 0, 0, 0, 0, WORKER_STATE_DISABLED)\n\n        if self.worker_processes > 0:\n            logger.debug('Asking scheduler for work...')\n            r = self._scheduler.get_work(\n                worker=self._id,\n                host=self.host,\n                assistant=self._assistant,\n                current_tasks=list(self._running_tasks.keys()),\n            )\n        else:\n            logger.debug('Checking if tasks are still pending')\n            r = self._scheduler.count_pending(worker=self._id)\n\n        running_tasks = r['running_tasks']\n        task_id = self._get_work_task_id(r)\n\n        self._get_work_response_history.append(\n            {\n                'task_id': task_id,\n                'running_tasks': running_tasks,\n            }\n        )\n\n        if task_id is not None and task_id not in self._scheduled_tasks:\n            logger.info('Did not schedule %s, will load it dynamically', task_id)\n\n            try:\n                # TODO: we should obtain the module name from the server!\n                self._scheduled_tasks[task_id] = load_task(module=r.get('task_module'), task_name=r['task_family'], params_str=r['task_params'])\n            except TaskClassException as ex:\n                self._handle_task_load_error(ex, [task_id])\n                task_id = None\n                self.run_succeeded = False\n\n        if task_id is not None and 'batch_task_ids' in r:\n            batch_tasks = filter(None, [self._scheduled_tasks.get(batch_id) for batch_id in r['batch_task_ids']])\n            self._batch_running_tasks[task_id] = batch_tasks\n\n        return GetWorkResponse(\n            task_id=task_id,\n            running_tasks=running_tasks,\n            n_pending_tasks=r['n_pending_tasks'],\n            n_unique_pending=r['n_unique_pending'],\n            # TODO: For a tiny amount of time (a month?) we'll keep forwards compatibility\n            #  That is you can user a newer client than server (Sep 2016)\n            n_pending_last_scheduled=r.get('n_pending_last_scheduled', 0),\n            worker_state=r.get('worker_state', WORKER_STATE_ACTIVE),\n        )\n\n    def _run_task(self, task_id: str) -> None:\n        if task_id in self._running_tasks:\n            logger.debug(f'Got already running task id {task_id} from scheduler, taking a break')\n            next(self._sleeper())\n            return\n\n        task = self._scheduled_tasks[task_id]\n\n        task_process = self._create_task_process(task)\n\n        self._running_tasks[task_id] = task_process\n\n        if task_process.use_multiprocessing:\n            with fork_lock:\n                task_process.start()\n        else:\n            # Run in the same process\n            task_process.run()\n\n    def _create_task_process(self, task):\n        message_queue: Any = _fork_context.Queue() if task.accepts_messages else None\n        reporter = luigi.worker.TaskStatusReporter(self._scheduler, task.task_id, self._id, message_queue)\n        use_multiprocessing = self._config.force_multiprocessing or bool(self.worker_processes > 1)\n        return ContextManagedTaskProcess(\n            self._config.task_process_context,\n            task,\n            self._id,\n            self._task_result_queue,\n            reporter,\n            use_multiprocessing=use_multiprocessing,\n            worker_timeout=self._config.timeout,\n            check_unfulfilled_deps=self._config.check_unfulfilled_deps,\n            check_complete_on_run=self._config.check_complete_on_run,\n            task_completion_cache=self._task_completion_cache,\n            task_completion_check_at_run=self._config.task_completion_check_at_run,\n        )\n\n    def _purge_children(self) -> None:\n        \"\"\"\n        Find dead children and put a response on the result queue.\n\n        :return:\n        \"\"\"\n        for task_id, p in self._running_tasks.items():\n            if not p.is_alive() and p.exitcode:\n                error_msg = f'Task {task_id} died unexpectedly with exit code {p.exitcode}'\n                p.task.trigger_event(Event.PROCESS_FAILURE, p.task, error_msg)\n            elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive():\n                p.terminate()\n                error_msg = f'Task {task_id} timed out after {p.worker_timeout} seconds and was terminated.'\n                p.task.trigger_event(Event.TIMEOUT, p.task, error_msg)\n            else:\n                continue\n\n            logger.info(error_msg)\n            self._task_result_queue.put((task_id, FAILED, error_msg, [], []))\n\n    def _handle_next_task(self) -> None:\n        \"\"\"\n        We have to catch three ways a task can be \"done\":\n\n        1. normal execution: the task runs/fails and puts a result back on the queue,\n        2. new dependencies: the task yielded new deps that were not complete and\n           will be rescheduled and dependencies added,\n        3. child process dies: we need to catch this separately.\n        \"\"\"\n        self._idle_since = None\n        while True:\n            self._purge_children()  # Deal with subprocess failures\n\n            try:\n                task_id, status, expl, missing, new_requirements = self._task_result_queue.get(timeout=self._config.wait_interval)\n            except Queue.Empty:\n                return\n\n            task = self._scheduled_tasks[task_id]\n            if not task or task_id not in self._running_tasks:\n                continue\n                # Not a running task. Probably already removed.\n                # Maybe it yielded something?\n\n            # external task if run not implemented, retry-able if config option is enabled.\n            external_task_retryable = _is_external(task) and self._config.retry_external_tasks\n            if status == FAILED and not external_task_retryable:\n                self._email_task_failure(task, expl)\n\n            new_deps = []\n            if new_requirements:\n                new_req = [load_task(module, name, params) for module, name, params in new_requirements]\n                for t in new_req:\n                    self.add(t)\n                new_deps = [t.task_id for t in new_req]\n\n            self._add_task(\n                worker=self._id,\n                task_id=task_id,\n                status=status,\n                expl=json.dumps(expl),\n                resources=task.process_resources(),\n                runnable=None,\n                params=task.to_str_params(),\n                family=task.task_family,\n                module=task.task_module,\n                new_deps=new_deps,\n                assistant=self._assistant,\n                retry_policy_dict=_get_retry_policy_dict(task),\n            )\n\n            self._running_tasks.pop(task_id)\n\n            # re-add task to reschedule missing dependencies\n            if missing:\n                reschedule = True\n\n                # keep out of infinite loops by not rescheduling too many times\n                for task_id in missing:\n                    self.unfulfilled_counts[task_id] += 1\n                    if self.unfulfilled_counts[task_id] > self._config.max_reschedules:\n                        reschedule = False\n                if reschedule:\n                    self.add(task)\n\n            self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)\n            return\n\n    def _sleeper(self) -> Generator[None, None, None]:\n        # TODO is exponential backoff necessary?\n        while True:\n            jitter = self._config.wait_jitter\n            wait_interval = self._config.wait_interval + random.uniform(0, jitter)\n            logger.debug('Sleeping for %f seconds', wait_interval)\n            time.sleep(wait_interval)\n            yield\n\n    def _keep_alive(self, get_work_response: Any) -> bool:\n        \"\"\"\n        Returns true if a worker should stay alive given.\n\n        If worker-keep-alive is not set, this will always return false.\n        For an assistant, it will always return the value of worker-keep-alive.\n        Otherwise, it will return true for nonzero n_pending_tasks.\n\n        If worker-count-uniques is true, it will also\n        require that one of the tasks is unique to this worker.\n        \"\"\"\n        if not self._config.keep_alive:\n            return False\n        elif self._assistant:\n            return True\n        elif self._config.count_last_scheduled:\n            return cast(bool, get_work_response.n_pending_last_scheduled > 0)\n        elif self._config.count_uniques:\n            return cast(bool, get_work_response.n_unique_pending > 0)\n        elif get_work_response.n_pending_tasks == 0:\n            return False\n        elif not self._config.max_keep_alive_idle_duration:\n            return True\n        elif not self._idle_since:\n            return True\n        else:\n            time_to_shutdown = self._idle_since + self._config.max_keep_alive_idle_duration - datetime.datetime.now()\n            logger.debug('[%s] %s until shutdown', self._id, time_to_shutdown)\n            return time_to_shutdown > datetime.timedelta(0)\n\n    def handle_interrupt(self, signum: int, _: Any) -> None:\n        \"\"\"\n        Stops the assistant from asking for more work on SIGUSR1\n        \"\"\"\n        if signum == signal.SIGUSR1:\n            self._start_phasing_out()\n\n    def _start_phasing_out(self) -> None:\n        \"\"\"\n        Go into a mode where we dont ask for more work and quit once existing\n        tasks are done.\n        \"\"\"\n        self._config.keep_alive = False\n        self._stop_requesting_work = True\n\n    def run(self) -> bool:\n        \"\"\"\n        Returns True if all scheduled tasks were executed successfully.\n        \"\"\"\n        logger.info('Running Worker with %d processes', self.worker_processes)\n\n        sleeper = self._sleeper()\n        self.run_succeeded = True\n\n        self._add_worker()\n\n        while True:\n            while len(self._running_tasks) >= self.worker_processes > 0:\n                logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))\n                self._handle_next_task()\n\n            get_work_response = self._get_work()\n\n            if get_work_response.worker_state == WORKER_STATE_DISABLED:\n                self._start_phasing_out()\n\n            if get_work_response.task_id is None:\n                if not self._stop_requesting_work:\n                    self._log_remote_tasks(get_work_response)\n                if len(self._running_tasks) == 0:\n                    self._idle_since = self._idle_since or datetime.datetime.now()\n                    if self._keep_alive(get_work_response):\n                        next(sleeper)\n                        continue\n                    else:\n                        break\n                else:\n                    self._handle_next_task()\n                    continue\n\n            # task_id is not None:\n            logger.debug('Pending tasks: %s', get_work_response.n_pending_tasks)\n            self._run_task(get_work_response.task_id)\n\n        while len(self._running_tasks):\n            logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))\n            self._handle_next_task()\n\n        return self.run_succeeded\n\n    def _handle_rpc_message(self, message: dict[str, Any]) -> None:\n        logger.info(f'Worker {self._id} got message {message}')\n\n        # the message is a dict {'name': <function_name>, 'kwargs': <function_kwargs>}\n        name = message['name']\n        kwargs = message['kwargs']\n\n        # find the function and check if it's callable and configured to work\n        # as a message callback\n        func = getattr(self, name, None)\n        tpl = (self._id, name)\n        if not callable(func):\n            logger.error(\"Worker {} has no function '{}'\".format(*tpl))\n        elif not getattr(func, 'is_rpc_message_callback', False):\n            logger.error(\"Worker {} function '{}' is not available as rpc message callback\".format(*tpl))\n        else:\n            logger.info(\"Worker {} successfully dispatched rpc message to function '{}'\".format(*tpl))\n            func(**kwargs)\n\n    @luigi.worker.rpc_message_callback\n    def set_worker_processes(self, n: int) -> None:\n        # set the new value\n        self.worker_processes = max(1, n)\n\n        # tell the scheduler\n        self._scheduler.add_worker(self._id, {'workers': self.worker_processes})\n\n    @luigi.worker.rpc_message_callback\n    def dispatch_scheduler_message(self, task_id: str, message_id: str, content: str, **kwargs: Any) -> None:\n        task_id = str(task_id)\n        if task_id in self._running_tasks:\n            task_process = self._running_tasks[task_id]\n            if task_process.status_reporter.scheduler_messages:\n                message = luigi.worker.SchedulerMessage(self._scheduler, task_id, message_id, content, **kwargs)\n                task_process.status_reporter.scheduler_messages.put(message)\n"
  },
  {
    "path": "gokart/workspace_management.py",
    "content": "from __future__ import annotations\n\nimport itertools\nimport os\nimport pathlib\nfrom logging import getLogger\nfrom typing import Any\n\nimport gokart\nfrom gokart.utils import flatten\n\nlogger = getLogger(__name__)\n\n\ndef _get_all_output_file_paths(task: gokart.TaskOnKart[Any]) -> list[str]:\n    output_paths = [t.path() for t in flatten(task.output())]\n    children = flatten(task.requires())\n    output_paths.extend(itertools.chain.from_iterable([_get_all_output_file_paths(child) for child in children]))\n    return output_paths\n\n\ndef delete_local_unnecessary_outputs(task: gokart.TaskOnKart[Any]) -> None:\n    task.make_unique_id()  # this is required to make unique ids.\n    all_files = {str(path) for path in pathlib.Path(task.workspace_directory).rglob('*.*')}\n    log_files = {str(path) for path in pathlib.Path(os.path.join(task.workspace_directory, 'log')).rglob('*.*')}\n    necessary_files = set(_get_all_output_file_paths(task))\n    unnecessary_files = all_files - necessary_files - log_files\n    if len(unnecessary_files) == 0:\n        logger.info('all files are necessary for this task.')\n    else:\n        logger.info(f'remove following files: {os.linesep} {os.linesep.join(unnecessary_files)}')\n    for file in unnecessary_files:\n        os.remove(file)\n"
  },
  {
    "path": "gokart/zip_client.py",
    "content": "from __future__ import annotations\n\nimport os\nimport shutil\nimport zipfile\nfrom abc import abstractmethod\nfrom typing import IO\n\n\ndef _unzip_file(fp: str | IO[bytes] | os.PathLike[str], extract_dir: str) -> None:\n    zip_file = zipfile.ZipFile(fp)\n    zip_file.extractall(extract_dir)\n    zip_file.close()\n\n\nclass ZipClient:\n    @abstractmethod\n    def exists(self) -> bool:\n        pass\n\n    @abstractmethod\n    def make_archive(self) -> None:\n        pass\n\n    @abstractmethod\n    def unpack_archive(self) -> None:\n        pass\n\n    @abstractmethod\n    def remove(self) -> None:\n        pass\n\n    @property\n    @abstractmethod\n    def path(self) -> str:\n        pass\n\n\nclass LocalZipClient(ZipClient):\n    def __init__(self, file_path: str, temporary_directory: str) -> None:\n        self._file_path = file_path\n        self._temporary_directory = temporary_directory\n\n    def exists(self) -> bool:\n        return os.path.exists(self._file_path)\n\n    def make_archive(self) -> None:\n        [base, extension] = os.path.splitext(self._file_path)\n        shutil.make_archive(base_name=base, format=extension[1:], root_dir=self._temporary_directory)\n\n    def unpack_archive(self) -> None:\n        _unzip_file(fp=self._file_path, extract_dir=self._temporary_directory)\n\n    def remove(self) -> None:\n        shutil.rmtree(self._file_path, ignore_errors=True)\n\n    @property\n    def path(self) -> str:\n        return self._file_path\n"
  },
  {
    "path": "gokart/zip_client_util.py",
    "content": "from __future__ import annotations\n\nfrom gokart.object_storage import ObjectStorage\nfrom gokart.zip_client import LocalZipClient, ZipClient\n\n\ndef make_zip_client(file_path: str, temporary_directory: str) -> ZipClient:\n    if ObjectStorage.if_object_storage_path(file_path):\n        return ObjectStorage.get_zip_client(file_path=file_path, temporary_directory=temporary_directory)\n    return LocalZipClient(file_path=file_path, temporary_directory=temporary_directory)\n"
  },
  {
    "path": "luigi.cfg",
    "content": "[core]\n  autoload_range: false\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"hatchling\", \"uv-dynamic-versioning\"]\nbuild-backend = \"hatchling.build\"\n\n[project]\nname = \"gokart\"\ndescription=\"Gokart solves reproducibility, task dependencies, constraints of good code, and ease of use for Machine Learning Pipeline. [Documentation](https://gokart.readthedocs.io/en/latest/)\"\nauthors = [\n  {name = \"M3, inc.\"}\n]\nlicense = \"MIT\"\nreadme = \"README.md\"\nrequires-python = \">=3.10, <4\"\ndependencies = [\n  \"luigi>=3.8.0\",\n  \"boto3\",\n  \"slack-sdk\",\n  \"pandas\",\n  \"numpy\",\n  \"google-auth\",\n  \"pyarrow\",\n  \"google-api-python-client\",\n  \"APScheduler\",\n  \"redis\",\n  \"dill\",\n  \"backoff\",\n  \"typing-extensions>=4.11.0; python_version<'3.13'\",\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"License :: OSI Approved :: MIT License\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n  \"Programming Language :: Python :: 3.14\",\n]\ndynamic = [\"version\"]\n\n[project.optional-dependencies]\npolars = [\"polars>=0.19.0\"]\n\n[project.urls]\nHomepage = \"https://github.com/m3dev/gokart\"\nRepository = \"https://github.com/m3dev/gokart\"\nDocumentation = \"https://gokart.readthedocs.io/en/latest/\"\n\n[dependency-groups]\ntest = [\n  \"fakeredis\",\n  \"lupa\",\n  \"matplotlib\",\n  \"moto>=4.0\", # for use mock_aws api\n  \"mypy\",\n  \"polars>=0.19.0\",\n  \"pytest\",\n  \"pytest-cov\",\n  \"pytest-xdist\",\n  \"testfixtures\",\n  \"toml\",\n  \"types-redis\",\n  \"typing-extensions>=4.11.0\",\n]\n\nlint = [\n  \"ruff\",\n  \"mypy\",\n]\n\n[tool.uv]\ndefault-groups = ['test', 'lint']\ncache-keys = [ { file = \"pyproject.toml\" }, { git = true } ]\n\n[tool.hatch.version]\nsource = \"uv-dynamic-versioning\"\n\n[tool.uv-dynamic-versioning]\nenable = true\n\n[tool.hatch.build.targets.sdist]\ninclude = [\n  \"/LICENSE\",\n  \"/README.md\",\n  \"/examples\",\n  \"/gokart\",\n  \"/test\",\n]\n\n[tool.ruff]\nline-length = 160\nexclude = [\"venv/*\", \"tox/*\", \"examples/*\"]\n\n[tool.ruff.lint]\n# All the rules are listed on https://docs.astral.sh/ruff/rules/\nextend-select = [\n  \"B\", # bugbear\n  \"I\", # isort\n  \"UP\", # pyupgrade, upgrade syntax for newer versions of the language.\n]\n\n# B006: Do not use mutable data structures for argument defaults. They are created during function definition time. All calls to the function reuse this one instance of that data structure, persisting changes between them.\n# B008 Do not perform function calls in argument defaults.  The call is performed only once at function definition time. All calls to your function will reuse the result of that definition-time function call.  If this is intended, assign the function call to a module-level variable and use that variable as a default value.\nignore = [\"B006\", \"B008\"]\n\n[tool.ruff.format]\nquote-style = \"single\"\n\n[tool.mypy]\nignore_missing_imports = true\n\ncheck_untyped_defs = true\n\nwarn_unused_configs = true\nwarn_redundant_casts = true\nno_implicit_optional = true\nstrict_optional = true\nstrict_equality = true\nwarn_unused_ignores = true\nwarn_return_any = true\ndisallow_incomplete_defs = true\ndisallow_any_generics = true\n\n[tool.pytest.ini_options]\ntestpaths = [\"test\"]\naddopts = \"-n auto -s -v --durations=0\"\n"
  },
  {
    "path": "test/__init__.py",
    "content": ""
  },
  {
    "path": "test/config/__init__.py",
    "content": "from pathlib import Path\nfrom typing import Final\n\nCONFIG_DIR: Final[Path] = Path(__file__).parent.resolve()\nPYPROJECT_TOML: Final[Path] = CONFIG_DIR / 'pyproject.toml'\nPYPROJECT_TOML_SET_DISALLOW_MISSING_PARAMETERS: Final[Path] = CONFIG_DIR / 'pyproject_disallow_missing_parameters.toml'\nTEST_CONFIG_INI: Final[Path] = CONFIG_DIR / 'test_config.ini'\n"
  },
  {
    "path": "test/config/pyproject.toml",
    "content": "[tool.mypy]\nplugins = [\"gokart.mypy\"]\n\n[[tool.mypy.overrides]]\nignore_missing_imports = true\nmodule = [\"pandas.*\", \"apscheduler.*\", \"dill.*\", \"boto3.*\", \"testfixtures.*\", \"luigi.*\"]\n"
  },
  {
    "path": "test/config/pyproject_disallow_missing_parameters.toml",
    "content": "[tool.mypy]\nplugins = [\"gokart.mypy\"]\n\n[[tool.mypy.overrides]]\nignore_missing_imports = true\nmodule = [\"pandas.*\", \"apscheduler.*\", \"dill.*\", \"boto3.*\", \"testfixtures.*\", \"luigi.*\"]\n\n[tool.gokart-mypy]\ndisallow_missing_parameters = true\n"
  },
  {
    "path": "test/config/test_config.ini",
    "content": "[test_read_config._DummyTask]\nparam = ${test_param}\n\n[test_build._DummyTask]\nparam = ${test_param}"
  },
  {
    "path": "test/conflict_prevention_lock/__init__.py",
    "content": ""
  },
  {
    "path": "test/conflict_prevention_lock/test_task_lock.py",
    "content": "import random\nimport unittest\nfrom typing import Any\nfrom unittest.mock import patch\n\nimport gokart\nfrom gokart.conflict_prevention_lock.task_lock import RedisClient, TaskLockParams, make_task_lock_key, make_task_lock_params, make_task_lock_params_for_run\n\n\nclass TestRedisClient(unittest.TestCase):\n    @staticmethod\n    def _get_randint(host, port):\n        return random.randint(0, 100000)\n\n    def test_redis_client_is_singleton(self):\n        with patch('redis.Redis') as mock:\n            mock.side_effect = self._get_randint\n\n            redis_client_0_0 = RedisClient(host='host_0', port=123)\n            redis_client_1 = RedisClient(host='host_1', port=123)\n            redis_client_0_1 = RedisClient(host='host_0', port=123)\n\n            self.assertNotEqual(redis_client_0_0, redis_client_1)\n            self.assertEqual(redis_client_0_0, redis_client_0_1)\n\n            self.assertEqual(redis_client_0_0.get_redis_client(), redis_client_0_1.get_redis_client())\n\n\nclass TestMakeRedisKey(unittest.TestCase):\n    def test_make_redis_key(self):\n        result = make_task_lock_key(file_path='gs://test_ll/dir/fname.pkl', unique_id='12345')\n        self.assertEqual(result, 'fname_12345')\n\n\nclass TestMakeRedisParams(unittest.TestCase):\n    def test_make_task_lock_params_with_valid_host(self):\n        result = make_task_lock_params(\n            file_path='gs://aaa.pkl', unique_id='123', redis_host='0.0.0.0', redis_port=12345, redis_timeout=180, raise_task_lock_exception_on_collision=False\n        )\n        expected = TaskLockParams(\n            redis_host='0.0.0.0',\n            redis_port=12345,\n            redis_key='aaa_123',\n            should_task_lock=True,\n            redis_timeout=180,\n            raise_task_lock_exception_on_collision=False,\n            lock_extend_seconds=10,\n        )\n        self.assertEqual(result, expected)\n\n    def test_make_task_lock_params_with_no_host(self):\n        result = make_task_lock_params(\n            file_path='gs://aaa.pkl', unique_id='123', redis_host=None, redis_port=12345, redis_timeout=180, raise_task_lock_exception_on_collision=False\n        )\n        expected = TaskLockParams(\n            redis_host=None,\n            redis_port=12345,\n            redis_key='aaa_123',\n            should_task_lock=False,\n            redis_timeout=180,\n            raise_task_lock_exception_on_collision=False,\n            lock_extend_seconds=10,\n        )\n        self.assertEqual(result, expected)\n\n    def test_assert_when_redis_timeout_is_too_short(self):\n        with self.assertRaises(AssertionError):\n            make_task_lock_params(\n                file_path='test_dir/test_file.pkl',\n                unique_id='123abc',\n                redis_host='0.0.0.0',\n                redis_port=12345,\n                redis_timeout=2,\n            )\n\n\nclass TestMakeTaskLockParamsForRun(unittest.TestCase):\n    def test_make_task_lock_params_for_run(self):\n        class _SampleDummyTask(gokart.TaskOnKart[Any]):\n            pass\n\n        task_self = _SampleDummyTask(\n            redis_host='0.0.0.0',\n            redis_port=12345,\n            redis_timeout=180,\n        )\n\n        result = make_task_lock_params_for_run(task_self=task_self, lock_extend_seconds=10)\n        expected = TaskLockParams(\n            redis_host='0.0.0.0',\n            redis_port=12345,\n            redis_timeout=180,\n            redis_key='_SampleDummyTask_7e857f231830ca0fd6cf829d99f43961-run',\n            should_task_lock=True,\n            raise_task_lock_exception_on_collision=True,\n            lock_extend_seconds=10,\n        )\n\n        self.assertEqual(result, expected)\n"
  },
  {
    "path": "test/conflict_prevention_lock/test_task_lock_wrappers.py",
    "content": "import time\nimport unittest\nfrom unittest.mock import MagicMock, patch\n\nimport fakeredis\n\nfrom gokart.conflict_prevention_lock.task_lock import make_task_lock_params\nfrom gokart.conflict_prevention_lock.task_lock_wrappers import wrap_dump_with_lock, wrap_load_with_lock, wrap_remove_with_lock\n\n\ndef _sample_func_with_error(a: int, b: str) -> None:\n    raise Exception()\n\n\ndef _sample_long_func(a: int, b: str) -> dict[str, int | str]:\n    time.sleep(2.7)\n    return dict(a=a, b=b)\n\n\nclass TestWrapDumpWithLock(unittest.TestCase):\n    def test_no_redis(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host=None,\n            redis_port=None,\n        )\n        mock_func = MagicMock()\n        wrap_dump_with_lock(func=mock_func, task_lock_params=task_lock_params, exist_check=lambda: False)(123, b='abc')\n\n        mock_func.assert_called_once()\n        called_args, called_kwargs = mock_func.call_args\n        self.assertTupleEqual(called_args, (123,))\n        self.assertDictEqual(called_kwargs, dict(b='abc'))\n\n    def test_use_redis(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.side_effect = fakeredis.FakeRedis\n            mock_func = MagicMock()\n            wrap_dump_with_lock(func=mock_func, task_lock_params=task_lock_params, exist_check=lambda: False)(123, b='abc')\n\n            mock_func.assert_called_once()\n            called_args, called_kwargs = mock_func.call_args\n            self.assertTupleEqual(called_args, (123,))\n            self.assertDictEqual(called_kwargs, dict(b='abc'))\n\n    def test_if_func_is_skipped_when_cache_already_exists(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.side_effect = fakeredis.FakeRedis\n            mock_func = MagicMock()\n            wrap_dump_with_lock(func=mock_func, task_lock_params=task_lock_params, exist_check=lambda: True)(123, b='abc')\n\n            mock_func.assert_not_called()\n\n    def test_check_lock_extended(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n            redis_timeout=2,\n            lock_extend_seconds=1,\n        )\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.side_effect = fakeredis.FakeRedis\n            wrap_dump_with_lock(func=_sample_long_func, task_lock_params=task_lock_params, exist_check=lambda: False)(123, b='abc')\n\n    def test_lock_is_removed_after_func_is_finished(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        server = fakeredis.FakeServer()\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.return_value = fakeredis.FakeRedis(server=server, host=task_lock_params.redis_host, port=task_lock_params.redis_port)\n            mock_func = MagicMock()\n            wrap_dump_with_lock(func=mock_func, task_lock_params=task_lock_params, exist_check=lambda: False)(123, b='abc')\n\n            mock_func.assert_called_once()\n            called_args, called_kwargs = mock_func.call_args\n            self.assertTupleEqual(called_args, (123,))\n            self.assertDictEqual(called_kwargs, dict(b='abc'))\n\n            fake_redis = fakeredis.FakeStrictRedis(server=server)\n            with self.assertRaises(KeyError):\n                fake_redis[task_lock_params.redis_key]\n\n    def test_lock_is_removed_after_func_is_finished_with_error(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        server = fakeredis.FakeServer()\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.return_value = fakeredis.FakeRedis(server=server, host=task_lock_params.redis_host, port=task_lock_params.redis_port)\n            try:\n                wrap_dump_with_lock(func=_sample_func_with_error, task_lock_params=task_lock_params, exist_check=lambda: False)(123, b='abc')\n            except Exception:\n                fake_redis = fakeredis.FakeStrictRedis(server=server)\n                with self.assertRaises(KeyError):\n                    fake_redis[task_lock_params.redis_key]\n\n\nclass TestWrapLoadWithLock(unittest.TestCase):\n    def test_no_redis(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host=None,\n            redis_port=None,\n        )\n        mock_func = MagicMock()\n        resulted = wrap_load_with_lock(func=mock_func, task_lock_params=task_lock_params)(123, b='abc')\n\n        mock_func.assert_called_once()\n        called_args, called_kwargs = mock_func.call_args\n        self.assertTupleEqual(called_args, (123,))\n        self.assertDictEqual(called_kwargs, dict(b='abc'))\n\n        self.assertEqual(resulted, mock_func())\n\n    def test_use_redis(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.side_effect = fakeredis.FakeRedis\n            mock_func = MagicMock()\n            resulted = wrap_load_with_lock(func=mock_func, task_lock_params=task_lock_params)(123, b='abc')\n\n            mock_func.assert_called_once()\n            called_args, called_kwargs = mock_func.call_args\n            self.assertTupleEqual(called_args, (123,))\n            self.assertDictEqual(called_kwargs, dict(b='abc'))\n\n            self.assertEqual(resulted, mock_func())\n\n    def test_check_lock_extended(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n            redis_timeout=2,\n            lock_extend_seconds=1,\n        )\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.side_effect = fakeredis.FakeRedis\n            resulted = wrap_load_with_lock(func=_sample_long_func, task_lock_params=task_lock_params)(123, b='abc')\n            expected = dict(a=123, b='abc')\n            self.assertEqual(resulted, expected)\n\n    def test_lock_is_removed_after_func_is_finished(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        server = fakeredis.FakeServer()\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.return_value = fakeredis.FakeRedis(server=server, host=task_lock_params.redis_host, port=task_lock_params.redis_port)\n            mock_func = MagicMock()\n            resulted = wrap_load_with_lock(func=mock_func, task_lock_params=task_lock_params)(123, b='abc')\n\n            mock_func.assert_called_once()\n            called_args, called_kwargs = mock_func.call_args\n            self.assertTupleEqual(called_args, (123,))\n            self.assertDictEqual(called_kwargs, dict(b='abc'))\n            self.assertEqual(resulted, mock_func())\n\n            fake_redis = fakeredis.FakeStrictRedis(server=server)\n            with self.assertRaises(KeyError):\n                fake_redis[task_lock_params.redis_key]\n\n    def test_lock_is_removed_after_func_is_finished_with_error(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        server = fakeredis.FakeServer()\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.return_value = fakeredis.FakeRedis(server=server, host=task_lock_params.redis_host, port=task_lock_params.redis_port)\n            try:\n                wrap_load_with_lock(func=_sample_func_with_error, task_lock_params=task_lock_params)(123, b='abc')\n            except Exception:\n                fake_redis = fakeredis.FakeStrictRedis(server=server)\n                with self.assertRaises(KeyError):\n                    fake_redis[task_lock_params.redis_key]\n\n\nclass TestWrapRemoveWithLock(unittest.TestCase):\n    def test_no_redis(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host=None,\n            redis_port=None,\n        )\n        mock_func = MagicMock()\n        resulted = wrap_remove_with_lock(func=mock_func, task_lock_params=task_lock_params)(123, b='abc')\n\n        mock_func.assert_called_once()\n        called_args, called_kwargs = mock_func.call_args\n        self.assertTupleEqual(called_args, (123,))\n        self.assertDictEqual(called_kwargs, dict(b='abc'))\n        self.assertEqual(resulted, mock_func())\n\n    def test_use_redis(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.side_effect = fakeredis.FakeRedis\n            mock_func = MagicMock()\n            resulted = wrap_remove_with_lock(func=mock_func, task_lock_params=task_lock_params)(123, b='abc')\n\n            mock_func.assert_called_once()\n            called_args, called_kwargs = mock_func.call_args\n            self.assertTupleEqual(called_args, (123,))\n            self.assertDictEqual(called_kwargs, dict(b='abc'))\n            self.assertEqual(resulted, mock_func())\n\n    def test_check_lock_extended(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n            redis_timeout=2,\n            lock_extend_seconds=1,\n        )\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.side_effect = fakeredis.FakeRedis\n            resulted = wrap_remove_with_lock(func=_sample_long_func, task_lock_params=task_lock_params)(123, b='abc')\n            expected = dict(a=123, b='abc')\n            self.assertEqual(resulted, expected)\n\n    def test_lock_is_removed_after_func_is_finished(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        server = fakeredis.FakeServer()\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.return_value = fakeredis.FakeRedis(server=server, host=task_lock_params.redis_host, port=task_lock_params.redis_port)\n            mock_func = MagicMock()\n            resulted = wrap_remove_with_lock(func=mock_func, task_lock_params=task_lock_params)(123, b='abc')\n            mock_func.assert_called_once()\n            called_args, called_kwargs = mock_func.call_args\n            self.assertTupleEqual(called_args, (123,))\n            self.assertDictEqual(called_kwargs, dict(b='abc'))\n            self.assertEqual(resulted, mock_func())\n\n            fake_redis = fakeredis.FakeStrictRedis(server=server)\n            with self.assertRaises(KeyError):\n                fake_redis[task_lock_params.redis_key]\n\n    def test_lock_is_removed_after_func_is_finished_with_error(self):\n        task_lock_params = make_task_lock_params(\n            file_path='test_dir/test_file.pkl',\n            unique_id='123abc',\n            redis_host='0.0.0.0',\n            redis_port=12345,\n        )\n\n        server = fakeredis.FakeServer()\n\n        with patch('gokart.conflict_prevention_lock.task_lock.redis.Redis') as redis_mock:\n            redis_mock.return_value = fakeredis.FakeRedis(server=server, host=task_lock_params.redis_host, port=task_lock_params.redis_port)\n            try:\n                wrap_remove_with_lock(func=_sample_func_with_error, task_lock_params=task_lock_params)(123, b='abc')\n            except Exception:\n                fake_redis = fakeredis.FakeStrictRedis(server=server)\n                with self.assertRaises(KeyError):\n                    fake_redis[task_lock_params.redis_key]\n"
  },
  {
    "path": "test/file_processor/__init__.py",
    "content": ""
  },
  {
    "path": "test/file_processor/test_base.py",
    "content": "\"\"\"Tests for base file processors (non-DataFrame processors).\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport tempfile\nimport unittest\nfrom collections.abc import Callable\n\nimport boto3\nfrom luigi import LocalTarget\nfrom moto import mock_aws\n\nfrom gokart.file_processor import PickleFileProcessor\nfrom gokart.object_storage import ObjectStorage\n\n\nclass TestPickleFileProcessor(unittest.TestCase):\n    def test_dump_and_load_normal_obj(self):\n        var = 'abc'\n        processor = PickleFileProcessor()\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.pkl'\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(var, f)\n            with local_target.open('r') as f:\n                loaded = processor.load(f)\n\n        self.assertEqual(loaded, var)\n\n    def test_dump_and_load_class(self):\n        import functools\n\n        def plus1(func: Callable[..., int]) -> Callable[..., int]:\n            @functools.wraps(func)\n            def wrapped() -> int:\n                ret = func()\n                return ret + 1\n\n            return wrapped\n\n        class A:\n            def __init__(self) -> None:\n                self.run = plus1(self.run)  # type: ignore\n\n            def run(self) -> int:  # type: ignore\n                return 1\n\n        obj = A()\n        processor = PickleFileProcessor()\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.pkl'\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(obj, f)\n            with local_target.open('r') as f:\n                loaded = processor.load(f)\n\n        self.assertEqual(loaded.run(), obj.run())\n\n    @mock_aws\n    def test_dump_and_load_with_readables3file(self):\n        conn = boto3.resource('s3', region_name='us-east-1')\n        conn.create_bucket(Bucket='test')\n        file_path = os.path.join('s3://test/', 'test.pkl')\n\n        var = 'abc'\n        processor = PickleFileProcessor()\n\n        target = ObjectStorage.get_object_storage_target(file_path, processor.format())\n        with target.open('w') as f:\n            processor.dump(var, f)\n        with target.open('r') as f:\n            loaded = processor.load(f)\n\n        self.assertEqual(loaded, var)\n"
  },
  {
    "path": "test/file_processor/test_factory.py",
    "content": "\"\"\"Tests for file processor factory function.\"\"\"\n\nfrom __future__ import annotations\n\nimport unittest\n\nfrom gokart.file_processor import (\n    CsvFileProcessor,\n    FeatherFileProcessor,\n    GzipFileProcessor,\n    JsonFileProcessor,\n    NpzFileProcessor,\n    ParquetFileProcessor,\n    TextFileProcessor,\n    make_file_processor,\n)\n\n\nclass TestMakeFileProcessor(unittest.TestCase):\n    def test_make_file_processor_with_txt_extension(self):\n        processor = make_file_processor('test.txt', store_index_in_feather=False)\n        self.assertIsInstance(processor, TextFileProcessor)\n\n    def test_make_file_processor_with_csv_extension(self):\n        processor = make_file_processor('test.csv', store_index_in_feather=False)\n        self.assertIsInstance(processor, CsvFileProcessor)\n\n    def test_make_file_processor_with_gz_extension(self):\n        processor = make_file_processor('test.gz', store_index_in_feather=False)\n        self.assertIsInstance(processor, GzipFileProcessor)\n\n    def test_make_file_processor_with_json_extension(self):\n        processor = make_file_processor('test.json', store_index_in_feather=False)\n        self.assertIsInstance(processor, JsonFileProcessor)\n\n    def test_make_file_processor_with_ndjson_extension(self):\n        processor = make_file_processor('test.ndjson', store_index_in_feather=False)\n        self.assertIsInstance(processor, JsonFileProcessor)\n\n    def test_make_file_processor_with_npz_extension(self):\n        processor = make_file_processor('test.npz', store_index_in_feather=False)\n        self.assertIsInstance(processor, NpzFileProcessor)\n\n    def test_make_file_processor_with_parquet_extension(self):\n        processor = make_file_processor('test.parquet', store_index_in_feather=False)\n        self.assertIsInstance(processor, ParquetFileProcessor)\n\n    def test_make_file_processor_with_feather_extension(self):\n        processor = make_file_processor('test.feather', store_index_in_feather=True)\n        self.assertIsInstance(processor, FeatherFileProcessor)\n\n    def test_make_file_processor_with_unsupported_extension(self):\n        with self.assertRaises(AssertionError):\n            make_file_processor('test.unsupported', store_index_in_feather=False)\n"
  },
  {
    "path": "test/file_processor/test_pandas.py",
    "content": "\"\"\"Tests for pandas-specific file processors.\"\"\"\n\nfrom __future__ import annotations\n\nimport tempfile\nimport unittest\n\nimport pandas as pd\nimport pytest\nfrom luigi import LocalTarget\n\nfrom gokart.file_processor import CsvFileProcessor, FeatherFileProcessor, JsonFileProcessor\n\n\nclass TestCsvFileProcessor(unittest.TestCase):\n    def test_dump_csv_with_utf8(self):\n        df = pd.DataFrame({'あ': [1, 2, 3], 'い': [4, 5, 6]})\n        processor = CsvFileProcessor()\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            # read with utf-8 to check if the file is dumped with utf8\n            loaded_df = pd.read_csv(temp_path, encoding='utf-8')\n            pd.testing.assert_frame_equal(df, loaded_df)\n\n    def test_dump_csv_with_cp932(self):\n        df = pd.DataFrame({'あ': [1, 2, 3], 'い': [4, 5, 6]})\n        processor = CsvFileProcessor(encoding='cp932')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            # read with cp932 to check if the file is dumped with cp932\n            loaded_df = pd.read_csv(temp_path, encoding='cp932')\n            pd.testing.assert_frame_equal(df, loaded_df)\n\n    def test_load_csv_with_utf8(self):\n        df = pd.DataFrame({'あ': [1, 2, 3], 'い': [4, 5, 6]})\n        processor = CsvFileProcessor()\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n            df.to_csv(temp_path, encoding='utf-8', index=False)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                # read with utf-8 to check if the file is dumped with utf8\n                loaded_df = processor.load(f)\n                pd.testing.assert_frame_equal(df, loaded_df)\n\n    def test_load_csv_with_cp932(self):\n        df = pd.DataFrame({'あ': [1, 2, 3], 'い': [4, 5, 6]})\n        processor = CsvFileProcessor(encoding='cp932')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n            df.to_csv(temp_path, encoding='cp932', index=False)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                # read with cp932 to check if the file is dumped with cp932\n                loaded_df = processor.load(f)\n                pd.testing.assert_frame_equal(df, loaded_df)\n\n\nclass TestJsonFileProcessor:\n    @pytest.mark.parametrize(\n        'orient,input_data,expected_json',\n        [\n            pytest.param(\n                None,\n                pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),\n                '{\"A\":{\"0\":1,\"1\":2,\"2\":3},\"B\":{\"0\":4,\"1\":5,\"2\":6}}',\n                id='With Default Orient for DataFrame',\n            ),\n            pytest.param(\n                'records',\n                pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),\n                '{\"A\":1,\"B\":4}\\n{\"A\":2,\"B\":5}\\n{\"A\":3,\"B\":6}\\n',\n                id='With Records Orient for DataFrame',\n            ),\n            pytest.param(None, {'A': [1, 2, 3], 'B': [4, 5, 6]}, '{\"A\":{\"0\":1,\"1\":2,\"2\":3},\"B\":{\"0\":4,\"1\":5,\"2\":6}}', id='With Default Orient for Dict'),\n            pytest.param('records', {'A': [1, 2, 3], 'B': [4, 5, 6]}, '{\"A\":1,\"B\":4}\\n{\"A\":2,\"B\":5}\\n{\"A\":3,\"B\":6}\\n', id='With Records Orient for Dict'),\n            pytest.param(None, {}, '{}', id='With Default Orient for Empty Dict'),\n            pytest.param('records', {}, '\\n', id='With Records Orient for Empty Dict'),\n        ],\n    )\n    def test_dump_and_load_json(self, orient, input_data, expected_json):\n        processor = JsonFileProcessor(orient=orient)\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.json'\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(input_data, f)\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n                f.seek(0)\n                loaded_json = f.read().decode('utf-8')\n\n        assert loaded_json == expected_json\n\n        df_input = pd.DataFrame(input_data)\n        pd.testing.assert_frame_equal(df_input, loaded_df)\n\n\nclass TestFeatherFileProcessor(unittest.TestCase):\n    def test_feather_should_return_same_dataframe(self):\n        df = pd.DataFrame({'a': [1]})\n        processor = FeatherFileProcessor(store_index_in_feather=True)\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.feather'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            pd.testing.assert_frame_equal(df, loaded_df)\n\n    def test_feather_should_save_index_name(self):\n        df = pd.DataFrame({'a': [1]}, index=pd.Index([1], name='index_name'))\n        processor = FeatherFileProcessor(store_index_in_feather=True)\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.feather'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            pd.testing.assert_frame_equal(df, loaded_df)\n\n    def test_feather_should_raise_error_index_name_is_None(self):\n        df = pd.DataFrame({'a': [1]}, index=pd.Index([1], name='None'))\n        processor = FeatherFileProcessor(store_index_in_feather=True)\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.feather'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                with self.assertRaises(AssertionError):\n                    processor.dump(df, f)\n"
  },
  {
    "path": "test/file_processor/test_polars.py",
    "content": "\"\"\"Tests for polars-specific file processors.\"\"\"\n\nfrom __future__ import annotations\n\nimport tempfile\nfrom typing import TYPE_CHECKING\n\nimport pandas as pd\nimport pytest\nfrom luigi import LocalTarget\n\nfrom gokart.file_processor import CsvFileProcessor, FeatherFileProcessor, JsonFileProcessor, ParquetFileProcessor\n\nif TYPE_CHECKING:\n    import polars as pl\n\ntry:\n    import polars as pl\n\n    HAS_POLARS = True\nexcept ImportError:\n    HAS_POLARS = False\n\n\n@pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\nclass TestCsvFileProcessorWithPolars:\n    \"\"\"Tests for CsvFileProcessor with polars support\"\"\"\n\n    def test_dump_polars_dataframe(self):\n        \"\"\"Test dumping a polars DataFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = CsvFileProcessor(dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            # Verify file was created and can be read by polars\n            loaded_df = pl.read_csv(temp_path)\n            assert loaded_df.equals(df)\n\n    def test_load_polars_dataframe(self):\n        \"\"\"Test loading a CSV as polars DataFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = CsvFileProcessor(dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n            df.write_csv(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_dump_and_load_polars_roundtrip(self):\n        \"\"\"Test roundtrip dump and load with polars\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = CsvFileProcessor(dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_dump_polars_with_pandas_load(self):\n        \"\"\"Test that polars dump can be loaded by pandas processor\"\"\"\n        df_polars = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor_polars = CsvFileProcessor(dataframe_type='polars')\n        processor_pandas = CsvFileProcessor(dataframe_type='pandas')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n\n            # Dump with polars\n            local_target = LocalTarget(path=temp_path, format=processor_polars.format())\n            with local_target.open('w') as f:\n                processor_polars.dump(df_polars, f)\n\n            # Load with pandas\n            with local_target.open('r') as f:\n                loaded_df = processor_pandas.load(f)\n\n            assert isinstance(loaded_df, pd.DataFrame)\n            # Compare values\n            df_polars.equals(pl.from_pandas(loaded_df))\n\n    def test_polars_with_different_separator(self):\n        \"\"\"Test polars with TSV (tab-separated values)\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = CsvFileProcessor(sep='\\t', dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.tsv'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_error_when_polars_not_available_for_load(self):\n        \"\"\"Test error message when polars is requested but a polars operation fails\"\"\"\n        # This test is a bit tricky since polars IS installed in this test class\n        # We'll just verify the processor accepts the parameter\n        processor = CsvFileProcessor(dataframe_type='polars')\n        assert processor._dataframe_type == 'polars'\n\n\n@pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\nclass TestJsonFileProcessorWithPolars:\n    \"\"\"Tests for JsonFileProcessor with polars support\"\"\"\n\n    def test_dump_polars_dataframe(self):\n        \"\"\"Test dumping a polars DataFrame to JSON\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = JsonFileProcessor(orient=None, dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.json'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            # Verify file was created and can be read by polars\n            loaded_df = pl.read_json(temp_path)\n            assert loaded_df.equals(df)\n\n    def test_load_polars_dataframe(self):\n        \"\"\"Test loading a JSON as polars DataFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = JsonFileProcessor(orient=None, dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.json'\n            df.write_json(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_dump_and_load_polars_roundtrip(self):\n        \"\"\"Test roundtrip dump and load with polars\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = JsonFileProcessor(orient=None, dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.json'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_dump_and_load_ndjson_with_polars(self):\n        \"\"\"Test ndjson (records orient) with polars\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = JsonFileProcessor(orient='records', dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.ndjson'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_dump_polars_with_pandas_load(self):\n        \"\"\"Test that polars dump can be loaded by pandas processor\"\"\"\n        df_polars = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor_polars = JsonFileProcessor(orient=None, dataframe_type='polars')\n        processor_pandas = JsonFileProcessor(orient=None, dataframe_type='pandas')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.json'\n\n            # Dump with polars\n            local_target = LocalTarget(path=temp_path, format=processor_polars.format())\n            with local_target.open('w') as f:\n                processor_polars.dump(df_polars, f)\n\n            # Load with pandas\n            with local_target.open('r') as f:\n                loaded_df = processor_pandas.load(f)\n\n            assert isinstance(loaded_df, pd.DataFrame)\n            # Compare values\n            assert list(loaded_df['a']) == [1, 2, 3]\n            assert list(loaded_df['b']) == [4, 5, 6]\n\n\n@pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\nclass TestParquetFileProcessorWithPolars:\n    \"\"\"Tests for ParquetFileProcessor with polars support\"\"\"\n\n    def test_dump_polars_dataframe(self):\n        \"\"\"Test dumping a polars DataFrame to Parquet\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = ParquetFileProcessor(dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.parquet'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            # Verify file was created and can be read by polars\n            loaded_df = pl.read_parquet(temp_path)\n            assert loaded_df.equals(df)\n\n    def test_load_polars_dataframe(self):\n        \"\"\"Test loading a Parquet as polars DataFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = ParquetFileProcessor(dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.parquet'\n            df.write_parquet(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_dump_and_load_polars_roundtrip(self):\n        \"\"\"Test roundtrip dump and load with polars\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = ParquetFileProcessor(dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.parquet'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_dump_polars_with_pandas_load(self):\n        \"\"\"Test that polars dump can be loaded by pandas processor\"\"\"\n        df_polars = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor_polars = ParquetFileProcessor(dataframe_type='polars')\n        processor_pandas = ParquetFileProcessor(dataframe_type='pandas')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.parquet'\n\n            # Dump with polars\n            local_target = LocalTarget(path=temp_path, format=processor_polars.format())\n            with local_target.open('w') as f:\n                processor_polars.dump(df_polars, f)\n\n            # Load with pandas\n            with local_target.open('r') as f:\n                loaded_df = processor_pandas.load(f)\n\n            assert isinstance(loaded_df, pd.DataFrame)\n            df_polars.equals(pl.from_pandas(loaded_df))\n\n    def test_parquet_with_compression(self):\n        \"\"\"Test polars with parquet compression\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = ParquetFileProcessor(compression='gzip', dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.parquet'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n\n@pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\nclass TestFeatherFileProcessorWithPolars:\n    \"\"\"Tests for FeatherFileProcessor with polars support\"\"\"\n\n    def test_dump_polars_dataframe(self):\n        \"\"\"Test dumping a polars DataFrame to Feather\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = FeatherFileProcessor(store_index_in_feather=False, dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.feather'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            # Verify file was created and can be read by polars\n            loaded_df = pl.read_ipc(temp_path)\n            assert loaded_df.equals(df)\n\n    def test_load_polars_dataframe(self):\n        \"\"\"Test loading a Feather as polars DataFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = FeatherFileProcessor(store_index_in_feather=False, dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.feather'\n            df.write_ipc(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_dump_and_load_polars_roundtrip(self):\n        \"\"\"Test roundtrip dump and load with polars\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = FeatherFileProcessor(store_index_in_feather=False, dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.feather'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(df, f)\n\n            with local_target.open('r') as f:\n                loaded_df = processor.load(f)\n\n            assert isinstance(loaded_df, pl.DataFrame)\n            assert loaded_df.equals(df)\n\n    def test_dump_polars_with_pandas_load(self):\n        \"\"\"Test that polars dump can be loaded by pandas processor\"\"\"\n        df_polars = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor_polars = FeatherFileProcessor(store_index_in_feather=False, dataframe_type='polars')\n        processor_pandas = FeatherFileProcessor(store_index_in_feather=False, dataframe_type='pandas')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.feather'\n\n            # Dump with polars\n            local_target = LocalTarget(path=temp_path, format=processor_polars.format())\n            with local_target.open('w') as f:\n                processor_polars.dump(df_polars, f)\n\n            # Load with pandas\n            with local_target.open('r') as f:\n                loaded_df = processor_pandas.load(f)\n\n            assert isinstance(loaded_df, pd.DataFrame)\n            # Compare values\n            df_polars.equals(pl.from_pandas(loaded_df))\n\n\n@pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\nclass TestLazyFrameSupport:\n    \"\"\"Tests for LazyFrame support in file processors using dataframe_type='polars-lazy'\"\"\"\n\n    def test_csv_load_lazy(self):\n        \"\"\"Test loading CSV as LazyFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = CsvFileProcessor(dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n            df.write_csv(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded = processor.load(f)\n\n            assert isinstance(loaded, pl.LazyFrame)\n            assert loaded.collect().equals(df)\n\n    def test_csv_dump_lazyframe(self):\n        \"\"\"Test dumping a LazyFrame to CSV\"\"\"\n        lf = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}).lazy()\n        processor = CsvFileProcessor(dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.csv'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(lf, f)\n\n            # Verify file was created and can be read\n            loaded_df = pl.read_csv(temp_path)\n            assert loaded_df.equals(lf.collect())\n\n    def test_parquet_load_lazy(self):\n        \"\"\"Test loading Parquet as LazyFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = ParquetFileProcessor(dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.parquet'\n            df.write_parquet(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded = processor.load(f)\n\n            assert isinstance(loaded, pl.LazyFrame)\n            assert loaded.collect().equals(df)\n\n    def test_parquet_dump_lazyframe(self):\n        \"\"\"Test dumping a LazyFrame to Parquet\"\"\"\n        lf = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}).lazy()\n        processor = ParquetFileProcessor(dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.parquet'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(lf, f)\n\n            # Verify file was created and can be read\n            loaded_df = pl.read_parquet(temp_path)\n            assert loaded_df.equals(lf.collect())\n\n    def test_feather_load_lazy(self):\n        \"\"\"Test loading Feather as LazyFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = FeatherFileProcessor(store_index_in_feather=False, dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.feather'\n            df.write_ipc(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded = processor.load(f)\n\n            assert isinstance(loaded, pl.LazyFrame)\n            assert loaded.collect().equals(df)\n\n    def test_feather_dump_lazyframe(self):\n        \"\"\"Test dumping a LazyFrame to Feather\"\"\"\n        lf = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}).lazy()\n        processor = FeatherFileProcessor(store_index_in_feather=False, dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.feather'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(lf, f)\n\n            # Verify file was created and can be read\n            loaded_df = pl.read_ipc(temp_path)\n            assert loaded_df.equals(lf.collect())\n\n    def test_json_load_lazy_ndjson(self):\n        \"\"\"Test loading NDJSON as LazyFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = JsonFileProcessor(orient='records', dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.ndjson'\n            df.write_ndjson(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded = processor.load(f)\n\n            assert isinstance(loaded, pl.LazyFrame)\n            assert loaded.collect().equals(df)\n\n    def test_json_dump_lazyframe_ndjson(self):\n        \"\"\"Test dumping a LazyFrame to NDJSON\"\"\"\n        lf = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}).lazy()\n        processor = JsonFileProcessor(orient='records', dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.ndjson'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(lf, f)\n\n            # Verify file was created and can be read\n            loaded_df = pl.read_ndjson(temp_path)\n            assert loaded_df.equals(lf.collect())\n\n    def test_json_load_lazy_standard(self):\n        \"\"\"Test loading standard JSON (orient=None) as LazyFrame\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = JsonFileProcessor(orient=None, dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.json'\n            df.write_json(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded = processor.load(f)\n\n            assert isinstance(loaded, pl.LazyFrame)\n            assert loaded.collect().equals(df)\n\n    def test_json_dump_lazyframe_standard(self):\n        \"\"\"Test dumping a LazyFrame to standard JSON (orient=None)\"\"\"\n        lf = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}).lazy()\n        processor = JsonFileProcessor(orient=None, dataframe_type='polars-lazy')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.json'\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('w') as f:\n                processor.dump(lf, f)\n\n            # Verify file was created and can be read\n            loaded_df = pl.read_json(temp_path)\n            assert loaded_df.equals(lf.collect())\n\n    def test_polars_returns_dataframe(self):\n        \"\"\"Test that dataframe_type='polars' returns DataFrame (not LazyFrame)\"\"\"\n        df = pl.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n        processor = ParquetFileProcessor(dataframe_type='polars')\n\n        with tempfile.TemporaryDirectory() as temp_dir:\n            temp_path = f'{temp_dir}/temp.parquet'\n            df.write_parquet(temp_path)\n\n            local_target = LocalTarget(path=temp_path, format=processor.format())\n            with local_target.open('r') as f:\n                loaded = processor.load(f)\n\n            assert isinstance(loaded, pl.DataFrame)\n            assert loaded.equals(df)\n"
  },
  {
    "path": "test/in_memory/test_in_memory_target.py",
    "content": "from datetime import datetime\nfrom time import sleep\n\nimport pytest\n\nfrom gokart.conflict_prevention_lock.task_lock import TaskLockParams\nfrom gokart.in_memory import InMemoryCacheRepository, InMemoryTarget, make_in_memory_target\n\n\nclass TestInMemoryTarget:\n    @pytest.fixture\n    def task_lock_params(self) -> TaskLockParams:\n        return TaskLockParams(\n            redis_host=None,\n            redis_port=None,\n            redis_timeout=None,\n            redis_key='dummy',\n            should_task_lock=False,\n            raise_task_lock_exception_on_collision=False,\n            lock_extend_seconds=0,\n        )\n\n    @pytest.fixture\n    def target(self, task_lock_params: TaskLockParams) -> InMemoryTarget:\n        return make_in_memory_target(target_key='dummy_key', task_lock_params=task_lock_params)\n\n    @pytest.fixture(autouse=True)\n    def clear_repo(self) -> None:\n        InMemoryCacheRepository().clear()\n\n    def test_dump_and_load_data(self, target: InMemoryTarget) -> None:\n        dumped = 'dummy_data'\n        target.dump(dumped)\n        loaded = target.load()\n        assert loaded == dumped\n\n    def test_exist(self, target: InMemoryTarget) -> None:\n        assert not target.exists()\n        target.dump('dummy_data')\n        assert target.exists()\n\n    def test_last_modified_time(self, target: InMemoryTarget) -> None:\n        input = 'dummy_data'\n        target.dump(input)\n        time = target.last_modification_time()\n        assert isinstance(time, datetime)\n\n        sleep(0.1)\n        another_input = 'another_data'\n        target.dump(another_input)\n        another_time = target.last_modification_time()\n        assert time < another_time\n\n        target.remove()\n        with pytest.raises(ValueError):\n            assert target.last_modification_time()\n"
  },
  {
    "path": "test/in_memory/test_repository.py",
    "content": "import time\n\nimport pytest\n\nfrom gokart.in_memory import InMemoryCacheRepository as Repo\n\ndummy_num = 100\n\n\nclass TestInMemoryCacheRepository:\n    @pytest.fixture\n    def repo(self) -> Repo:\n        repo = Repo()\n        repo.clear()\n        return repo\n\n    def test_set(self, repo: Repo) -> None:\n        repo.set_value('dummy_key', dummy_num)\n        assert repo.size == 1\n        for key, value in repo.get_gen():\n            assert (key, value) == ('dummy_key', dummy_num)\n\n        repo.set_value('another_key', 'another_value')\n        assert repo.size == 2\n\n    def test_get(self, repo: Repo) -> None:\n        repo.set_value('dummy_key', dummy_num)\n        repo.set_value('another_key', 'another_value')\n\n        \"\"\"Raise Error when key doesn't exist.\"\"\"\n        with pytest.raises(KeyError):\n            repo.get_value('not_exist_key')\n\n        assert repo.get_value('dummy_key') == dummy_num\n        assert repo.get_value('another_key') == 'another_value'\n\n    def test_empty(self, repo: Repo) -> None:\n        assert repo.empty()\n        repo.set_value('dummmy_key', dummy_num)\n        assert not repo.empty()\n\n    def test_has(self, repo: Repo) -> None:\n        assert not repo.has('dummy_key')\n        repo.set_value('dummy_key', dummy_num)\n        assert repo.has('dummy_key')\n        assert not repo.has('not_exist_key')\n\n    def test_remove(self, repo: Repo) -> None:\n        repo.set_value('dummy_key', dummy_num)\n\n        with pytest.raises(AssertionError):\n            repo.remove('not_exist_key')\n\n        repo.remove('dummy_key')\n        assert not repo.has('dummy_key')\n\n    def test_last_modification_time(self, repo: Repo) -> None:\n        repo.set_value('dummy_key', dummy_num)\n        date1 = repo.get_last_modification_time('dummy_key')\n        time.sleep(0.1)\n        repo.set_value('dummy_key', dummy_num)\n        date2 = repo.get_last_modification_time('dummy_key')\n        assert date1 < date2\n"
  },
  {
    "path": "test/slack/__init__.py",
    "content": ""
  },
  {
    "path": "test/slack/test_slack_api.py",
    "content": "import unittest\nfrom logging import getLogger\nfrom unittest import mock\nfrom unittest.mock import MagicMock\n\nfrom slack_sdk import WebClient\nfrom slack_sdk.web.slack_response import SlackResponse\nfrom testfixtures import LogCapture\n\nimport gokart.slack\n\nlogger = getLogger(__name__)\n\n\ndef _slack_response(token, data):\n    return SlackResponse(\n        client=WebClient(token=token), http_verb='POST', api_url='http://localhost:3000/api.test', req_args={}, data=data, headers={}, status_code=200\n    )\n\n\nclass TestSlackAPI(unittest.TestCase):\n    @mock.patch('gokart.slack.slack_api.slack_sdk.WebClient')\n    def test_initialization_with_invalid_token(self, patch):\n        def _conversations_list(params={}):\n            return _slack_response(token='invalid', data={'ok': False, 'error': 'error_reason'})\n\n        mock_client = MagicMock()\n        mock_client.conversations_list = MagicMock(side_effect=_conversations_list)\n        patch.return_value = mock_client\n\n        with LogCapture() as log:\n            gokart.slack.SlackAPI(token='invalid', channel='test', to_user='test user')\n            log.check(('gokart.slack.slack_api', 'WARNING', 'The job will start without slack notification: Channel test is not found in public channels.'))\n\n    @mock.patch('gokart.slack.slack_api.slack_sdk.WebClient')\n    def test_invalid_channel(self, patch):\n        def _conversations_list(params={}):\n            return _slack_response(\n                token='valid', data={'ok': True, 'channels': [{'name': 'valid', 'id': 'valid_id'}], 'response_metadata': {'next_cursor': ''}}\n            )\n\n        mock_client = MagicMock()\n        mock_client.conversations_list = MagicMock(side_effect=_conversations_list)\n        patch.return_value = mock_client\n\n        with LogCapture() as log:\n            gokart.slack.SlackAPI(token='valid', channel='invalid_channel', to_user='test user')\n            log.check(\n                ('gokart.slack.slack_api', 'WARNING', 'The job will start without slack notification: Channel invalid_channel is not found in public channels.')\n            )\n\n    @mock.patch('gokart.slack.slack_api.slack_sdk.WebClient')\n    def test_send_snippet_with_invalid_token(self, patch):\n        def _conversations_list(params={}):\n            return _slack_response(\n                token='valid', data={'ok': True, 'channels': [{'name': 'valid', 'id': 'valid_id'}], 'response_metadata': {'next_cursor': ''}}\n            )\n\n        def _api_call(method, data={}):\n            assert method == 'files.upload'\n            return {'ok': False, 'error': 'error_reason'}\n\n        mock_client = MagicMock()\n        mock_client.conversations_list = MagicMock(side_effect=_conversations_list)\n        mock_client.api_call = MagicMock(side_effect=_api_call)\n        patch.return_value = mock_client\n\n        with LogCapture() as log:\n            api = gokart.slack.SlackAPI(token='valid', channel='valid', to_user='test user')\n            api.send_snippet(comment='test', title='title', content='content')\n            log.check(\n                ('gokart.slack.slack_api', 'WARNING', 'Failed to send slack notification: Error while uploading file. The error reason is \"error_reason\".')\n            )\n\n    @mock.patch('gokart.slack.slack_api.slack_sdk.WebClient')\n    def test_send(self, patch):\n        def _conversations_list(params={}):\n            return _slack_response(\n                token='valid', data={'ok': True, 'channels': [{'name': 'valid', 'id': 'valid_id'}], 'response_metadata': {'next_cursor': ''}}\n            )\n\n        def _api_call(method, data={}):\n            assert method == 'files.upload'\n            return {'ok': False, 'error': 'error_reason'}\n\n        mock_client = MagicMock()\n        mock_client.conversations_list = MagicMock(side_effect=_conversations_list)\n        mock_client.api_call = MagicMock(side_effect=_api_call)\n        patch.return_value = mock_client\n\n        api = gokart.slack.SlackAPI(token='valid', channel='valid', to_user='test user')\n        api.send_snippet(comment='test', title='title', content='content')\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_build.py",
    "content": "from __future__ import annotations\n\nimport io\nimport logging\nimport os\nimport sys\nimport unittest\nfrom copy import copy\nfrom typing import Any\n\nif sys.version_info >= (3, 11):\n    from typing import assert_type\nelse:\n    from typing_extensions import assert_type\n\nfrom unittest.mock import patch\n\nimport luigi\nimport luigi.mock\n\nimport gokart\nfrom gokart.build import GokartBuildError, LoggerConfig, TaskDumpConfig, TaskDumpMode, TaskDumpOutputType, process_task_info\nfrom gokart.conflict_prevention_lock.task_lock import TaskLockException\n\n\nclass _DummyTask(gokart.TaskOnKart[str]):\n    task_namespace = __name__\n    param: luigi.Parameter = luigi.Parameter()\n\n    def output(self):\n        return self.make_target('./test/dummy.pkl')\n\n    def run(self):\n        self.dump(self.param)\n\n\nclass _DummyTaskTwoOutputs(gokart.TaskOnKart[dict[str, str]]):\n    task_namespace = __name__\n    param1: luigi.Parameter = luigi.Parameter()\n    param2: luigi.Parameter = luigi.Parameter()\n\n    def output(self):\n        return {'out1': self.make_target('./test/dummy1.pkl'), 'out2': self.make_target('./test/dummy2.pkl')}\n\n    def run(self):\n        self.dump(self.param1, 'out1')\n        self.dump(self.param2, 'out2')\n\n\nclass _DummyFailedTask(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n    def run(self):\n        raise RuntimeError\n\n\nclass _ParallelRunner(gokart.TaskOnKart[str]):\n    def requires(self):\n        return [_DummyTask(param=str(i)) for i in range(10)]\n\n    def run(self):\n        self.dump('done')\n\n\nclass _LoadRequires(gokart.TaskOnKart[str]):\n    task: gokart.TaskInstanceParameter[gokart.TaskOnKart[str]] = gokart.TaskInstanceParameter()\n\n    def requires(self):\n        return self.task\n\n    def run(self):\n        s = self.load(self.task)\n        self.dump(s)\n\n\nclass RunTest(unittest.TestCase):\n    def setUp(self):\n        luigi.setup_logging.DaemonLogging._configured = False\n        luigi.setup_logging.InterfaceLogging._configured = False\n        luigi.configuration.LuigiConfigParser._instance = None\n        self.config_paths = copy(luigi.configuration.LuigiConfigParser._config_paths)\n        luigi.mock.MockFileSystem().clear()\n        os.environ.clear()\n\n    def tearDown(self):\n        luigi.configuration.LuigiConfigParser._config_paths = self.config_paths\n        os.environ.clear()\n        luigi.setup_logging.DaemonLogging._configured = False\n        luigi.setup_logging.InterfaceLogging._configured = False\n\n    def test_build(self):\n        text = 'test'\n        output = gokart.build(_DummyTask(param=text), reset_register=False)\n        self.assertEqual(output, text)\n\n    def test_build_parallel(self):\n        output = gokart.build(_ParallelRunner(), reset_register=False, workers=20)\n        self.assertEqual(output, 'done')\n\n    def test_read_config(self):\n        class _DummyTask(gokart.TaskOnKart[Any]):\n            task_namespace = 'test_read_config'\n            param = luigi.Parameter()\n\n            def run(self):\n                self.dump(self.param)\n\n        os.environ.setdefault('test_param', 'test')\n        config_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config', 'test_config.ini')\n        gokart.utils.add_config(config_file_path)\n        output = gokart.build(_DummyTask(), reset_register=False)\n        self.assertIsInstance(output, str)\n        self.assertEqual(output, 'test')\n\n    def test_build_dict_outputs(self):\n        param_dict = {\n            'out1': 'test1',\n            'out2': 'test2',\n        }\n        output = gokart.build(_DummyTaskTwoOutputs(param1=param_dict['out1'], param2=param_dict['out2']), reset_register=False)\n        assert_type(output, dict[str, str])\n        self.assertEqual(output, param_dict)\n\n    def test_failed_task(self):\n        with self.assertRaises(GokartBuildError):\n            gokart.build(_DummyFailedTask(), reset_register=False, log_level=logging.CRITICAL)\n\n    def test_load_requires(self):\n        text = 'test'\n        output = gokart.build(_LoadRequires(task=_DummyTask(param=text)), reset_register=False)\n        self.assertEqual(output, text)\n\n    def test_build_with_child_task_error(self):\n        class CheckException(Exception):\n            pass\n\n        class FailTask(gokart.TaskOnKart[Any]):\n            def run(self):\n                raise CheckException()\n\n        t = FailTask()\n        with self.assertRaises(GokartBuildError) as cm:\n            gokart.build(t, reset_register=False, log_level=logging.CRITICAL)\n        e = cm.exception\n        self.assertEqual(len(e.raised_exceptions), 1)\n        self.assertIsInstance(e.raised_exceptions[t.make_unique_id()][0], CheckException)\n\n\nclass LoggerConfigTest(unittest.TestCase):\n    def test_logger_config(self):\n        for level, enable_expected, disable_expected in (\n            (logging.INFO, logging.INFO, logging.DEBUG),\n            (logging.DEBUG, logging.DEBUG, logging.NOTSET),\n            (logging.CRITICAL, logging.CRITICAL, logging.ERROR),\n        ):\n            with self.subTest(level=level, enable_expected=enable_expected, disable_expected=disable_expected):\n                with LoggerConfig(level) as lc:\n                    self.assertTrue(lc.logger.isEnabledFor(enable_expected))\n                    self.assertTrue(not lc.logger.isEnabledFor(disable_expected))\n\n\nclass ProcessTaskInfoTest(unittest.TestCase):\n    def test_process_task_info(self):\n        task = _DummyTask(param='test')\n        for config in (\n            TaskDumpConfig(mode=TaskDumpMode.TREE, output_type=TaskDumpOutputType.PRINT),\n            TaskDumpConfig(mode=TaskDumpMode.TABLE, output_type=TaskDumpOutputType.PRINT),\n        ):\n            with LoggerConfig(level=logging.INFO):\n                from gokart.build import logger\n\n                log_stream = io.StringIO()\n                handler = logging.StreamHandler(log_stream)\n\n                handler.setLevel(logging.INFO)\n                logger.addHandler(handler)\n                process_task_info(task, config)\n                logger.removeHandler(handler)\n                handler.close()\n                self.assertIn(member=str(task.make_unique_id()), container=log_stream.getvalue())\n\n\nclass _FailThreeTimesAndSuccessTask(gokart.TaskOnKart[Any]):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.failed_counter = 0\n\n    def run(self):\n        if self.failed_counter < 3:\n            self.failed_counter += 1\n            raise TaskLockException()\n        self.dump('done')\n\n\nclass TestBuildHasLockedTaskException(unittest.TestCase):\n    def test_build_expo_backoff_when_luigi_failed_due_to_locked_task(self):\n        gokart.build(_FailThreeTimesAndSuccessTask(), reset_register=False)\n\n\nclass TestBuildFailedAndSchedulingFailed(unittest.TestCase):\n    def test_build_raises_exception_on_failed_and_scheduling_failed(self):\n        \"\"\"Test that build() raises GokartBuildError when FAILED_AND_SCHEDULING_FAILED occurs\"\"\"\n\n        # Create a mock result object with FAILED_AND_SCHEDULING_FAILED status\n        class MockResult:\n            def __init__(self):\n                self.status = luigi.LuigiStatusCode.FAILED_AND_SCHEDULING_FAILED\n                self.summary_text = 'Task failed and scheduling failed'\n\n        # Mock luigi.build to return FAILED_AND_SCHEDULING_FAILED status\n        with patch('luigi.build') as mock_luigi_build:\n            mock_luigi_build.return_value = MockResult()\n\n            # This should now raise GokartBuildError after the fix\n            with self.assertRaises(GokartBuildError):\n                gokart.build(_DummyTask(param='test'), reset_register=False, log_level=logging.CRITICAL)\n\n    def test_build_not_raises_exception_when_success_with_retry(self):\n        \"\"\"Test that build() does not raise GokartBuildError when task succeeds with retry\"\"\"\n\n        # Create a mock result object with SUCCESS_WITH_RETRY status\n        class MockResult:\n            def __init__(self):\n                self.status = luigi.LuigiStatusCode.SUCCESS_WITH_RETRY\n                self.summary_text = 'Task completed successfully after retries'\n\n        # Mock _build_task to return a test value directly\n        with patch('luigi.build') as mock_luigi_build:\n            mock_luigi_build.return_value = MockResult()\n\n            # Create a mock task that will be used by build()\n            mock_task = _DummyTask(param='test')\n\n            # This should not raise GokartBuildError\n            # The test output will be whatever the mock returns\n            gokart.build(mock_task, reset_register=False, return_value=False, log_level=logging.CRITICAL)\n\n    def test_build_not_raises_exception_on_scheduling_failed_only(self):\n        \"\"\"Test that build() raises GokartBuildError when SCHEDULING_FAILED occurs\"\"\"\n\n        # Create a mock result object with SCHEDULING_FAILED status\n        class MockResult:\n            def __init__(self):\n                self.status = luigi.LuigiStatusCode.SCHEDULING_FAILED\n                self.summary_text = 'Task scheduling failed'\n\n        # Mock luigi.build to return SCHEDULING_FAILED status\n        with patch('luigi.build') as mock_luigi_build:\n            mock_luigi_build.return_value = MockResult()\n\n            # This should raise GokartBuildError after the fix\n            with self.assertRaises(GokartBuildError):\n                gokart.build(_DummyTask(param='test'), reset_register=False, log_level=logging.CRITICAL)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_cache_unique_id.py",
    "content": "import os\nimport unittest\nfrom typing import Any\n\nimport luigi\nimport luigi.mock\n\nimport gokart\n\n\nclass _DummyTask(gokart.TaskOnKart[Any]):\n    def requires(self):\n        return _DummyTaskDep()\n\n    def run(self):\n        self.dump(self.load())\n\n\nclass _DummyTaskDep(gokart.TaskOnKart[str]):\n    param: luigi.Parameter = luigi.Parameter()\n\n    def run(self):\n        self.dump(self.param)\n\n\nclass CacheUniqueIDTest(unittest.TestCase):\n    def setUp(self):\n        luigi.configuration.LuigiConfigParser._instance = None\n        luigi.mock.MockFileSystem().clear()\n        os.environ.clear()\n\n    @staticmethod\n    def _set_param(cls, attr_name: str, param: luigi.Parameter) -> None:  # type: ignore\n        # Luigi 3.8.0+ uses __set_name__ to register _attribute_name on Parameter descriptors.\n        # When assigning after class creation (bypassing the metaclass), call it manually.\n        param.__set_name__(cls, attr_name)\n        setattr(cls, attr_name, param)\n\n    def test_cache_unique_id_true(self):\n        self._set_param(_DummyTaskDep, 'param', luigi.Parameter(default='original_param'))\n\n        output1 = gokart.build(_DummyTask(cache_unique_id=True), reset_register=False)\n\n        self._set_param(_DummyTaskDep, 'param', luigi.Parameter(default='updated_param'))\n        output2 = gokart.build(_DummyTask(cache_unique_id=True), reset_register=False)\n        self.assertEqual(output1, output2)\n\n    def test_cache_unique_id_false(self):\n        self._set_param(_DummyTaskDep, 'param', luigi.Parameter(default='original_param'))\n\n        output1 = gokart.build(_DummyTask(cache_unique_id=False), reset_register=False)\n\n        self._set_param(_DummyTaskDep, 'param', luigi.Parameter(default='updated_param'))\n        output2 = gokart.build(_DummyTask(cache_unique_id=False), reset_register=False)\n        self.assertNotEqual(output1, output2)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_config_params.py",
    "content": "import unittest\nfrom typing import Any\n\nimport luigi\nfrom luigi.cmdline_parser import CmdlineParser\n\nimport gokart\nfrom gokart.config_params import inherits_config_params\n\n\ndef in_parse(cmds, deferred_computation):\n    \"\"\"function copied from luigi: https://github.com/spotify/luigi/blob/e2228418eec60b68ca09a30c878ab26413846847/test/helpers.py\"\"\"\n    with CmdlineParser.global_instance(cmds) as cp:\n        deferred_computation(cp.get_task_obj())\n\n\nclass ConfigClass(luigi.Config):\n    param_a = luigi.Parameter(default='config a')\n    param_b = luigi.Parameter(default='config b')\n    param_c = luigi.Parameter(default='config c')\n\n\n@inherits_config_params(ConfigClass)\nclass Inherited(gokart.TaskOnKart[Any]):\n    param_a = luigi.Parameter()\n    param_b = luigi.Parameter(default='overrided')\n\n\n@inherits_config_params(ConfigClass, parameter_alias={'param_a': 'param_d'})\nclass Inherited2(gokart.TaskOnKart[Any]):\n    param_c = luigi.Parameter()\n    param_d = luigi.Parameter()\n\n\nclass ChildTask(Inherited):\n    pass\n\n\nclass ChildTaskWithNewParam(Inherited):\n    param_new = luigi.Parameter()\n\n\nclass ConfigClass2(luigi.Config):\n    param_a = luigi.Parameter(default='config a from config class 2')\n\n\n@inherits_config_params(ConfigClass2)\nclass ChildTaskWithNewConfig(Inherited):\n    pass\n\n\nclass TestInheritsConfigParam(unittest.TestCase):\n    def test_inherited_params(self):\n        # test fill values\n        in_parse(['Inherited'], lambda task: self.assertEqual(task.param_a, 'config a'))\n\n        # test overrided\n        in_parse(['Inherited'], lambda task: self.assertEqual(task.param_b, 'config b'))\n\n        # Command line argument takes precedence over config param\n        in_parse(['Inherited', '--param-a', 'command line arg'], lambda task: self.assertEqual(task.param_a, 'command line arg'))\n\n        # Parameters which is not a member of the task will not be set\n        with self.assertRaises(AttributeError):\n            in_parse(['Inherited'], lambda task: task.param_c)\n\n        # test parameter name alias\n        in_parse(['Inherited2'], lambda task: self.assertEqual(task.param_c, 'config c'))\n        in_parse(['Inherited2'], lambda task: self.assertEqual(task.param_d, 'config a'))\n\n    def test_child_task(self):\n        in_parse(['ChildTask'], lambda task: self.assertEqual(task.param_a, 'config a'))\n        in_parse(['ChildTask'], lambda task: self.assertEqual(task.param_b, 'config b'))\n        in_parse(['ChildTask', '--param-a', 'command line arg'], lambda task: self.assertEqual(task.param_a, 'command line arg'))\n        with self.assertRaises(AttributeError):\n            in_parse(['ChildTask'], lambda task: task.param_c)\n\n    def test_child_override(self):\n        in_parse(['ChildTaskWithNewConfig'], lambda task: self.assertEqual(task.param_a, 'config a from config class 2'))\n        in_parse(['ChildTaskWithNewConfig'], lambda task: self.assertEqual(task.param_b, 'config b'))\n"
  },
  {
    "path": "test/test_explicit_bool_parameter.py",
    "content": "import unittest\nfrom typing import Any\n\nimport luigi\nimport luigi.mock\nfrom luigi.cmdline_parser import CmdlineParser\n\nimport gokart\n\n\ndef in_parse(cmds, deferred_computation):\n    with CmdlineParser.global_instance(cmds) as cp:\n        deferred_computation(cp.get_task_obj())\n\n\nclass WithDefaultTrue(gokart.TaskOnKart[Any]):\n    param = gokart.ExplicitBoolParameter(default=True)\n\n\nclass WithDefaultFalse(gokart.TaskOnKart[Any]):\n    param = gokart.ExplicitBoolParameter(default=False)\n\n\nclass ExplicitParsing(gokart.TaskOnKart[Any]):\n    param = gokart.ExplicitBoolParameter()\n\n    def run(self):\n        ExplicitParsing._param = self.param  # type: ignore\n\n\nclass TestExplicitBoolParameter(unittest.TestCase):\n    def test_bool_default(self):\n        self.assertTrue(WithDefaultTrue().param)\n        self.assertFalse(WithDefaultFalse().param)\n\n    def test_parse_param(self):\n        in_parse(['ExplicitParsing', '--param', 'true'], lambda task: self.assertTrue(task.param))\n        in_parse(['ExplicitParsing', '--param', 'false'], lambda task: self.assertFalse(task.param))\n        in_parse(['ExplicitParsing', '--param', 'True'], lambda task: self.assertTrue(task.param))\n        in_parse(['ExplicitParsing', '--param', 'False'], lambda task: self.assertFalse(task.param))\n\n    def test_missing_parameter(self):\n        with self.assertRaises(luigi.parameter.MissingParameterException):\n            in_parse(['ExplicitParsing'], lambda: True)\n\n    def test_value_error(self):\n        with self.assertRaises(ValueError):\n            in_parse(['ExplicitParsing', '--param', 'Foo'], lambda: True)\n\n    def test_expected_one_argment_error(self):\n        # argparse throw \"expected one argument\" error\n        with self.assertRaises(SystemExit):\n            in_parse(['ExplicitParsing', '--param'], lambda: True)\n"
  },
  {
    "path": "test/test_gcs_config.py",
    "content": "import os\nimport unittest\nfrom unittest.mock import MagicMock, patch\n\nfrom gokart.gcs_config import GCSConfig\n\n\nclass TestGCSConfig(unittest.TestCase):\n    def test_get_gcs_client_without_gcs_credential_name(self):\n        mock = MagicMock()\n        os.environ['env_name'] = ''\n        with patch('luigi.contrib.gcs.GCSClient', mock):\n            GCSConfig(gcs_credential_name='env_name')._get_gcs_client()\n            self.assertEqual(dict(oauth_credentials=None), mock.call_args[1])\n\n    def test_get_gcs_client_with_file_path(self):\n        mock = MagicMock()\n        file_path = 'test.json'\n        os.environ['env_name'] = file_path\n        with patch('luigi.contrib.gcs.GCSClient'):\n            with patch('google.oauth2.service_account.Credentials.from_service_account_file', mock):\n                with patch('os.path.isfile', return_value=True):\n                    GCSConfig(gcs_credential_name='env_name')._get_gcs_client()\n                    self.assertEqual(file_path, mock.call_args[0][0])\n\n    def test_get_gcs_client_with_json(self):\n        mock = MagicMock()\n        json_str = '{\"test\": 1}'\n        os.environ['env_name'] = json_str\n        with patch('luigi.contrib.gcs.GCSClient'):\n            with patch('google.oauth2.service_account.Credentials.from_service_account_info', mock):\n                GCSConfig(gcs_credential_name='env_name')._get_gcs_client()\n                self.assertEqual(dict(test=1), mock.call_args[0][0])\n"
  },
  {
    "path": "test/test_gcs_obj_metadata_client.py",
    "content": "from __future__ import annotations\n\nimport datetime\nimport unittest\nfrom typing import Any\nfrom unittest.mock import MagicMock, patch\n\nimport gokart\nfrom gokart.gcs_obj_metadata_client import GCSObjectMetadataClient\nfrom gokart.required_task_output import RequiredTaskOutput\nfrom gokart.target import TargetOnKart\n\n\nclass _DummyTaskOnKart(gokart.TaskOnKart[str]):\n    task_namespace = __name__\n\n    def run(self):\n        self.dump('Dummy TaskOnKart')\n\n\nclass TestGCSObjectMetadataClient(unittest.TestCase):\n    def setUp(self):\n        self.task_params: dict[str, str] = {\n            'param1': 'a' * 1000,\n            'param2': str(1000),\n            'param3': str({'key1': 'value1', 'key2': True, 'key3': 2}),\n            'param4': str([1, 2, 3, 4, 5]),\n            'param5': str(datetime.datetime(year=2025, month=1, day=2, hour=3, minute=4, second=5)),\n            'param6': '',\n        }\n        self.custom_labels: dict[str, Any] = {\n            'created_at': datetime.datetime(year=2025, month=1, day=2, hour=3, minute=4, second=5),\n            'created_by': 'hoge fuga',\n            'empty': True,\n            'try_num': 3,\n        }\n\n        self.task_params_with_conflicts = {\n            'empty': 'False',\n            'created_by': 'fuga hoge',\n            'param1': 'a' * 10,\n        }\n\n    def test_normalize_labels_not_empty(self):\n        got = GCSObjectMetadataClient._normalize_labels(None)\n        self.assertEqual(got, {})\n\n    def test_normalize_labels_has_value(self):\n        got = GCSObjectMetadataClient._normalize_labels(self.task_params)\n\n        self.assertIsInstance(got, dict)\n        self.assertIsInstance(got, dict)\n        self.assertIn('param1', got)\n        self.assertIn('param2', got)\n        self.assertIn('param3', got)\n        self.assertIn('param4', got)\n        self.assertIn('param5', got)\n        self.assertIn('param6', got)\n\n    def test_get_patched_obj_metadata_only_task_params(self):\n        got = GCSObjectMetadataClient._get_patched_obj_metadata({}, task_params=self.task_params, custom_labels=None)\n\n        self.assertIsInstance(got, dict)\n        self.assertIn('param1', got)\n        self.assertIn('param2', got)\n        self.assertIn('param3', got)\n        self.assertIn('param4', got)\n        self.assertIn('param5', got)\n        self.assertNotIn('param6', got)\n\n    def test_get_patched_obj_metadata_only_custom_labels(self):\n        got = GCSObjectMetadataClient._get_patched_obj_metadata({}, task_params=None, custom_labels=self.custom_labels)\n\n        self.assertIsInstance(got, dict)\n        self.assertIn('created_at', got)\n        self.assertIn('created_by', got)\n        self.assertIn('empty', got)\n        self.assertIn('try_num', got)\n\n    def test_get_patched_obj_metadata_with_both_task_params_and_custom_labels(self):\n        got = GCSObjectMetadataClient._get_patched_obj_metadata({}, task_params=self.task_params, custom_labels=self.custom_labels)\n\n        self.assertIsInstance(got, dict)\n        self.assertIn('param1', got)\n        self.assertIn('param2', got)\n        self.assertIn('param3', got)\n        self.assertIn('param4', got)\n        self.assertIn('param5', got)\n        self.assertNotIn('param6', got)\n        self.assertIn('created_at', got)\n        self.assertIn('created_by', got)\n        self.assertIn('empty', got)\n        self.assertIn('try_num', got)\n\n    def test_get_patched_obj_metadata_with_exceeded_size_metadata(self):\n        size_exceeded_task_params = {\n            'param1': 'a' * 5000,\n            'param2': 'b' * 5000,\n        }\n        want = {\n            'param1': 'a' * 5000,\n        }\n        got = GCSObjectMetadataClient._get_patched_obj_metadata({}, task_params=size_exceeded_task_params)\n        self.assertEqual(got, want)\n\n    def test_get_patched_obj_metadata_with_conflicts(self):\n        got = GCSObjectMetadataClient._get_patched_obj_metadata({}, task_params=self.task_params_with_conflicts, custom_labels=self.custom_labels)\n        self.assertIsInstance(got, dict)\n        self.assertIn('created_at', got)\n        self.assertIn('created_by', got)\n        self.assertIn('empty', got)\n        self.assertIn('try_num', got)\n        self.assertEqual(got['empty'], 'True')\n        self.assertEqual(got['created_by'], 'hoge fuga')\n        self.assertEqual(got['param1'], 'a' * 10)\n\n    def test_get_patched_obj_metadata_with_required_task_outputs(self):\n        got = GCSObjectMetadataClient._get_patched_obj_metadata(\n            {},\n            required_task_outputs=[\n                RequiredTaskOutput(task_name='task1', output_path='path/to/output1'),\n            ],\n        )\n\n        self.assertIsInstance(got, dict)\n        self.assertIn('__required_task_outputs', got)\n        self.assertEqual(got['__required_task_outputs'], '[{\"__gokart_task_name\": \"task1\", \"__gokart_output_path\": \"path/to/output1\"}]')\n\n    def test_get_patched_obj_metadata_with_nested_required_task_outputs(self):\n        got = GCSObjectMetadataClient._get_patched_obj_metadata(\n            {},\n            required_task_outputs={\n                'nested_task': {'nest': RequiredTaskOutput(task_name='task1', output_path='path/to/output1')},\n            },\n        )\n\n        self.assertIsInstance(got, dict)\n        self.assertIn('__required_task_outputs', got)\n        self.assertEqual(\n            got['__required_task_outputs'], '{\"nested_task\": {\"nest\": {\"__gokart_task_name\": \"task1\", \"__gokart_output_path\": \"path/to/output1\"}}}'\n        )\n\n    def test_adjust_gcs_metadata_limit_size_runtime_error(self):\n        large_labels = {}\n        for i in range(100):\n            large_labels[f'key_{i}'] = 'x' * 1000\n\n        GCSObjectMetadataClient._adjust_gcs_metadata_limit_size(large_labels)\n\n\nclass TestGokartTask(unittest.TestCase):\n    @patch.object(_DummyTaskOnKart, '_get_output_target')\n    def test_mock_target_on_kart(self, mock_get_output_target):\n        mock_target = MagicMock(spec=TargetOnKart)\n        mock_get_output_target.return_value = mock_target\n\n        task = _DummyTaskOnKart()\n        task.dump({'key': 'value'}, mock_target)\n\n        mock_target.dump.assert_called_once_with(\n            {'key': 'value'}, lock_at_dump=task._lock_at_dump, task_params={}, custom_labels=None, required_task_outputs=[]\n        )\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_info.py",
    "content": "import unittest\nfrom unittest.mock import patch\n\nimport luigi\nimport luigi.mock\nfrom luigi.mock import MockFileSystem, MockTarget\n\nimport gokart\nimport gokart.info\nfrom test.tree.test_task_info import _DoubleLoadSubTask, _SubTask, _Task\n\n\nclass TestInfo(unittest.TestCase):\n    def setUp(self) -> None:\n        MockFileSystem().clear()\n        luigi.setup_logging.DaemonLogging._configured = False\n        luigi.setup_logging.InterfaceLogging._configured = False\n\n    def tearDown(self) -> None:\n        luigi.setup_logging.DaemonLogging._configured = False\n        luigi.setup_logging.InterfaceLogging._configured = False\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_pending(self):\n        task = _Task(param=1, sub=_SubTask(param=2))\n\n        # check before running\n        tree = gokart.info.make_tree_info(task)\n        expected = r\"\"\"\n└─-\\(PENDING\\) _Task\\[[a-z0-9]*\\]\n   └─-\\(PENDING\\) _SubTask\\[[a-z0-9]*\\]$\"\"\"\n        self.assertRegex(tree, expected)\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_complete(self):\n        task = _Task(param=1, sub=_SubTask(param=2))\n\n        # check after sub task runs\n        gokart.build(task, reset_register=False)\n        tree = gokart.info.make_tree_info(task)\n        expected = r\"\"\"\n└─-\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n   └─-\\(COMPLETE\\) _SubTask\\[[a-z0-9]*\\]$\"\"\"\n        self.assertRegex(tree, expected)\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_abbreviation(self):\n        task = _DoubleLoadSubTask(\n            sub1=_Task(param=1, sub=_SubTask(param=2)),\n            sub2=_Task(param=1, sub=_SubTask(param=2)),\n        )\n\n        # check after sub task runs\n        gokart.build(task, reset_register=False)\n        tree = gokart.info.make_tree_info(task)\n        expected = r\"\"\"\n└─-\\(COMPLETE\\) _DoubleLoadSubTask\\[[a-z0-9]*\\]\n   \\|--\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n   \\|  └─-\\(COMPLETE\\) _SubTask\\[[a-z0-9]*\\]\n   └─-\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n      └─- \\.\\.\\.$\"\"\"\n        self.assertRegex(tree, expected)\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_not_compress(self):\n        task = _DoubleLoadSubTask(\n            sub1=_Task(param=1, sub=_SubTask(param=2)),\n            sub2=_Task(param=1, sub=_SubTask(param=2)),\n        )\n\n        # check after sub task runs\n        gokart.build(task, reset_register=False)\n        tree = gokart.info.make_tree_info(task, abbr=False)\n        expected = r\"\"\"\n└─-\\(COMPLETE\\) _DoubleLoadSubTask\\[[a-z0-9]*\\]\n   \\|--\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n   \\|  └─-\\(COMPLETE\\) _SubTask\\[[a-z0-9]*\\]\n   └─-\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n      └─-\\(COMPLETE\\) _SubTask\\[[a-z0-9]*\\]$\"\"\"\n        self.assertRegex(tree, expected)\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_not_compress_ignore_task(self):\n        task = _DoubleLoadSubTask(\n            sub1=_Task(param=1, sub=_SubTask(param=2)),\n            sub2=_Task(param=1, sub=_SubTask(param=2)),\n        )\n\n        # check after sub task runs\n        gokart.build(task, reset_register=False)\n        tree = gokart.info.make_tree_info(task, abbr=False, ignore_task_names=['_Task'])\n        expected = r\"\"\"\n└─-\\(COMPLETE\\) _DoubleLoadSubTask\\[[a-z0-9]*\\]$\"\"\"\n        self.assertRegex(tree, expected)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_large_data_fram_processor.py",
    "content": "import os\nimport shutil\nimport unittest\n\nimport numpy as np\nimport pandas as pd\n\nfrom gokart.target import LargeDataFrameProcessor\nfrom test.util import _get_temporary_directory\n\n\nclass LargeDataFrameProcessorTest(unittest.TestCase):\n    def setUp(self):\n        self.temporary_directory = _get_temporary_directory()\n\n    def tearDown(self):\n        shutil.rmtree(self.temporary_directory, ignore_errors=True)\n\n    def test_save_and_load(self):\n        file_path = os.path.join(self.temporary_directory, 'test.zip')\n        df = pd.DataFrame(dict(data=np.random.uniform(0, 1, size=int(1e6))))\n        processor = LargeDataFrameProcessor(max_byte=int(1e6))\n        processor.save(df, file_path)\n        loaded = processor.load(file_path)\n\n        pd.testing.assert_frame_equal(loaded, df, check_like=True)\n\n    def test_save_and_load_empty(self):\n        file_path = os.path.join(self.temporary_directory, 'test_with_empty.zip')\n        df = pd.DataFrame()\n        processor = LargeDataFrameProcessor(max_byte=int(1e6))\n        processor.save(df, file_path)\n        loaded = processor.load(file_path)\n\n        pd.testing.assert_frame_equal(loaded, df, check_like=True)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_list_task_instance_parameter.py",
    "content": "import unittest\nfrom typing import Any\n\nimport luigi\n\nimport gokart\nfrom gokart import TaskOnKart\n\n\nclass _DummySubTask(TaskOnKart[Any]):\n    task_namespace = __name__\n    pass\n\n\nclass _DummyTask(TaskOnKart[Any]):\n    task_namespace = __name__\n    param: luigi.IntParameter = luigi.IntParameter()\n    task: gokart.TaskInstanceParameter[_DummySubTask] = gokart.TaskInstanceParameter(default=_DummySubTask())\n\n\nclass ListTaskInstanceParameterTest(unittest.TestCase):\n    def setUp(self):\n        _DummyTask.clear_instance_cache()\n\n    def test_serialize_and_parse(self):\n        original = [_DummyTask(param=3), _DummyTask(param=3)]\n        s = gokart.ListTaskInstanceParameter().serialize(original)\n        parsed = gokart.ListTaskInstanceParameter().parse(s)\n        self.assertEqual(parsed[0].task_id, original[0].task_id)\n        self.assertEqual(parsed[1].task_id, original[1].task_id)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_mypy.py",
    "content": "import tempfile\nimport unittest\n\nfrom mypy import api\n\nfrom test.config import PYPROJECT_TOML\n\n\nclass TestMyMypyPlugin(unittest.TestCase):\n    def test_plugin_no_issue(self):\n        test_code = \"\"\"\nimport luigi\nfrom luigi import Parameter\nimport gokart\nimport datetime\n\n\nclass MyTask(gokart.TaskOnKart):\n    foo: int = luigi.IntParameter() # type: ignore\n    bar: str = luigi.Parameter() # type: ignore\n    baz: bool = gokart.ExplicitBoolParameter()\n    qux: str = Parameter()\n    # https://github.com/m3dev/gokart/issues/395\n    datetime: datetime.datetime = luigi.DateMinuteParameter(interval=10, default=datetime.datetime(2021, 1, 1))\n\n\n\n# TaskOnKart parameters:\n#   - `complete_check_at_run`\nMyTask(foo=1, bar='bar', baz=False, qux='qux', complete_check_at_run=False)\n\"\"\"\n\n        with tempfile.NamedTemporaryFile(suffix='.py') as test_file:\n            test_file.write(test_code.encode('utf-8'))\n            test_file.flush()\n            stdout, stderr, exitcode = api.run(['--no-incremental', '--cache-dir=/dev/null', '--config-file', str(PYPROJECT_TOML), test_file.name])\n            self.assertEqual(exitcode, 0, f'mypy plugin error occurred:\\nstdout: {stdout}\\nstderr: {stderr}')\n            self.assertIn('Success: no issues found', stdout)\n\n    def test_plugin_invalid_arg(self):\n        test_code = \"\"\"\nimport luigi\nimport gokart\n\n\nclass MyTask(gokart.TaskOnKart):\n    foo: int = luigi.IntParameter() # type: ignore\n    bar: str = luigi.Parameter() # type: ignore\n    baz: bool = gokart.ExplicitBoolParameter()\n\n# issue: foo is int\n# not issue: bar is missing, because it can be set by config file.\n# TaskOnKart parameters:\n#   - `complete_check_at_run`\nMyTask(foo='1', baz='not bool', complete_check_at_run='not bool')\n        \"\"\"\n\n        with tempfile.NamedTemporaryFile(suffix='.py') as test_file:\n            test_file.write(test_code.encode('utf-8'))\n            test_file.flush()\n            stdout, stderr, exitcode = api.run(['--no-incremental', '--cache-dir=/dev/null', '--config-file', str(PYPROJECT_TOML), test_file.name])\n            self.assertEqual(exitcode, 1, f'mypy plugin error not occurred:\\nstdout: {stdout}\\nstderr: {stderr}')\n            self.assertIn('error: Argument \"foo\" to \"MyTask\" has incompatible type \"str\"; expected \"int\"  [arg-type]', stdout)\n            self.assertIn('error: Argument \"baz\" to \"MyTask\" has incompatible type \"str\"; expected \"bool\"  [arg-type]', stdout)\n            self.assertIn('error: Argument \"complete_check_at_run\" to \"MyTask\" has incompatible type \"str\"; expected \"bool\"  [arg-type]', stdout)\n            self.assertIn('Found 3 errors in 1 file (checked 1 source file)', stdout)\n"
  },
  {
    "path": "test/test_pandas_type_check_framework.py",
    "content": "from __future__ import annotations\n\nimport logging\nimport unittest\nfrom logging import getLogger\nfrom typing import Any\nfrom unittest.mock import patch\n\nimport luigi\nimport pandas as pd\nfrom luigi.mock import MockFileSystem, MockTarget\n\nimport gokart\nfrom gokart.build import GokartBuildError\nfrom gokart.pandas_type_config import PandasTypeConfig\n\nlogger = getLogger(__name__)\n\n\nclass TestPandasTypeConfig(PandasTypeConfig):\n    task_namespace = 'test_pandas_type_check_framework'\n\n    @classmethod\n    def type_dict(cls) -> dict[str, Any]:\n        return {'system_cd': int}\n\n\nclass _DummyFailTask(gokart.TaskOnKart[pd.DataFrame]):\n    task_namespace = 'test_pandas_type_check_framework'\n    rerun: luigi.BoolParameter = luigi.BoolParameter(default=True, significant=False)\n\n    def output(self):\n        return self.make_target('dummy.pkl')\n\n    def run(self):\n        df = pd.DataFrame(dict(system_cd=['1']))\n        self.dump(df)\n\n\nclass _DummyFailWithNoneTask(gokart.TaskOnKart[pd.DataFrame]):\n    task_namespace = 'test_pandas_type_check_framework'\n    rerun: luigi.BoolParameter = luigi.BoolParameter(default=True, significant=False)\n\n    def output(self):\n        return self.make_target('dummy.pkl')\n\n    def run(self):\n        df = pd.DataFrame(dict(system_cd=[1, None]))\n        self.dump(df)\n\n\nclass _DummySuccessTask(gokart.TaskOnKart[pd.DataFrame]):\n    task_namespace = 'test_pandas_type_check_framework'\n    rerun: luigi.BoolParameter = luigi.BoolParameter(default=True, significant=False)\n\n    def output(self):\n        return self.make_target('dummy.pkl')\n\n    def run(self):\n        df = pd.DataFrame(dict(system_cd=[1]))\n        self.dump(df)\n\n\nclass TestPandasTypeCheckFramework(unittest.TestCase):\n    def setUp(self) -> None:\n        luigi.setup_logging.DaemonLogging._configured = False\n        luigi.setup_logging.InterfaceLogging._configured = False\n        MockFileSystem().clear()\n        # same way as luigi https://github.com/spotify/luigi/blob/fe7ecf4acf7cf4c084bd0f32162c8e0721567630/test/helpers.py#L175\n        self._stashed_reg = luigi.task_register.Register._get_reg()\n\n    def tearDown(self) -> None:\n        luigi.setup_logging.DaemonLogging._configured = False\n        luigi.setup_logging.InterfaceLogging._configured = False\n        luigi.task_register.Register._set_reg(self._stashed_reg)\n\n    @patch('sys.argv', new=['main', 'test_pandas_type_check_framework._DummyFailTask', '--log-level=CRITICAL', '--local-scheduler', '--no-lock'])\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_fail_with_gokart_run(self):\n        with self.assertRaises(SystemExit) as exit_code:\n            gokart.run()\n        self.assertNotEqual(exit_code.exception.code, 0)  # raise Error\n\n    def test_fail(self):\n        with self.assertRaises(GokartBuildError):\n            gokart.build(_DummyFailTask(), log_level=logging.CRITICAL)\n\n    def test_fail_with_None(self):\n        with self.assertRaises(GokartBuildError):\n            gokart.build(_DummyFailWithNoneTask(), log_level=logging.CRITICAL)\n\n    def test_success(self):\n        gokart.build(_DummySuccessTask())\n        # no error\n"
  },
  {
    "path": "test/test_pandas_type_config.py",
    "content": "from __future__ import annotations\n\nfrom datetime import date, datetime\nfrom typing import Any\nfrom unittest import TestCase\n\nimport numpy as np\nimport pandas as pd\n\nfrom gokart import PandasTypeConfig\nfrom gokart.pandas_type_config import PandasTypeError\n\n\nclass _DummyPandasTypeConfig(PandasTypeConfig):\n    @classmethod\n    def type_dict(cls) -> dict[str, Any]:\n        return {'int_column': int, 'datetime_column': datetime, 'array_column': np.ndarray}\n\n\nclass TestPandasTypeConfig(TestCase):\n    def test_int_fail(self):\n        df = pd.DataFrame(dict(int_column=['1']))\n        with self.assertRaises(PandasTypeError):\n            _DummyPandasTypeConfig().check(df)\n\n    def test_int_success(self):\n        df = pd.DataFrame(dict(int_column=[1]))\n        _DummyPandasTypeConfig().check(df)\n\n    def test_datetime_fail(self):\n        df = pd.DataFrame(dict(datetime_column=[date(2019, 1, 12)]))\n        with self.assertRaises(PandasTypeError):\n            _DummyPandasTypeConfig().check(df)\n\n    def test_datetime_success(self):\n        df = pd.DataFrame(dict(datetime_column=[datetime(2019, 1, 12, 0, 0, 0)]))\n        _DummyPandasTypeConfig().check(df)\n\n    def test_array_fail(self):\n        df = pd.DataFrame(dict(array_column=[[1, 2]]))\n        with self.assertRaises(PandasTypeError):\n            _DummyPandasTypeConfig().check(df)\n\n    def test_array_success(self):\n        df = pd.DataFrame(dict(array_column=[np.array([1, 2])]))\n        _DummyPandasTypeConfig().check(df)\n"
  },
  {
    "path": "test/test_restore_task_by_id.py",
    "content": "import unittest\nfrom typing import Any\nfrom unittest.mock import patch\n\nimport luigi\nimport luigi.mock\n\nimport gokart\n\n\nclass _SubDummyTask(gokart.TaskOnKart[str]):\n    task_namespace = __name__\n    param: luigi.IntParameter = luigi.IntParameter()\n\n    def run(self):\n        self.dump('test')\n\n\nclass _DummyTask(gokart.TaskOnKart[str]):\n    task_namespace = __name__\n    sub_task: gokart.TaskInstanceParameter[gokart.TaskOnKart[Any]] = gokart.TaskInstanceParameter()\n\n    def output(self):\n        return self.make_target('test.txt')\n\n    def run(self):\n        self.dump('test')\n\n\nclass RestoreTaskByIDTest(unittest.TestCase):\n    def setUp(self) -> None:\n        luigi.mock.MockFileSystem().clear()\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: luigi.mock.MockTarget(path, **kwargs))\n    def test(self):\n        task = _DummyTask(sub_task=_SubDummyTask(param=10))\n        luigi.build([task], local_scheduler=True, log_level='CRITICAL')\n\n        unique_id = task.make_unique_id()\n        restored = _DummyTask.restore(unique_id)\n        self.assertTrue(task.make_unique_id(), restored.make_unique_id())\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_run.py",
    "content": "import os\nimport unittest\nfrom typing import Any\nfrom unittest.mock import MagicMock, patch\n\nimport luigi\nimport luigi.mock\n\nimport gokart\nfrom gokart.run import _try_to_send_event_summary_to_slack\n\n\nclass _DummyTask(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n    param: luigi.StrParameter = luigi.StrParameter()\n\n\nclass RunTest(unittest.TestCase):\n    def setUp(self):\n        luigi.configuration.LuigiConfigParser._instance = None\n        luigi.mock.MockFileSystem().clear()\n        os.environ.clear()\n\n    @patch('sys.argv', new=['main', f'{__name__}._DummyTask', '--param', 'test', '--log-level=CRITICAL', '--local-scheduler'])\n    def test_run(self):\n        config_file_path = os.path.join(os.path.dirname(__name__), 'config', 'test_config.ini')\n        luigi.configuration.LuigiConfigParser.add_config_path(config_file_path)\n        os.environ.setdefault('test_param', 'test')\n        with self.assertRaises(SystemExit) as exit_code:\n            gokart.run()\n        self.assertEqual(exit_code.exception.code, 0)\n\n    @patch('sys.argv', new=['main', f'{__name__}._DummyTask', '--log-level=CRITICAL', '--local-scheduler'])\n    def test_run_with_undefined_environ(self):\n        config_file_path = os.path.join(os.path.dirname(__name__), 'config', 'test_config.ini')\n        luigi.configuration.LuigiConfigParser.add_config_path(config_file_path)\n        with self.assertRaises(luigi.parameter.MissingParameterException):\n            gokart.run()\n\n    @patch(\n        'sys.argv',\n        new=[\n            'main',\n            '--tree-info-mode=simple',\n            '--tree-info-output-path=tree.txt',\n            f'{__name__}._DummyTask',\n            '--param',\n            'test',\n            '--log-level=CRITICAL',\n            '--local-scheduler',\n        ],\n    )\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: luigi.mock.MockTarget(path, **kwargs))\n    def test_run_tree_info(self):\n        config_file_path = os.path.join(os.path.dirname(__name__), 'config', 'test_config.ini')\n        luigi.configuration.LuigiConfigParser.add_config_path(config_file_path)\n        os.environ.setdefault('test_param', 'test')\n        tree_info = gokart.tree_info(mode='simple', output_path='tree.txt')\n        with self.assertRaises(SystemExit):\n            gokart.run()\n        self.assertTrue(gokart.make_tree_info(_DummyTask(param='test')), tree_info.output().load())\n\n    @patch('gokart.make_tree_info')\n    def test_try_to_send_event_summary_to_slack(self, make_tree_info_mock: MagicMock) -> None:\n        event_aggregator_mock = MagicMock()\n        event_aggregator_mock.get_summury.return_value = f'{__name__}._DummyTask'\n        event_aggregator_mock.get_event_list.return_value = f'{__name__}._DummyTask:[]'\n        make_tree_info_mock.return_value = 'tree'\n\n        def get_content(content: str, **kwargs: Any) -> None:\n            self.output = content\n\n        slack_api_mock = MagicMock()\n        slack_api_mock.send_snippet.side_effect = get_content\n\n        cmdline_args = [f'{__name__}._DummyTask', '--param', 'test']\n        with patch('gokart.slack.SlackConfig.send_tree_info', True):\n            _try_to_send_event_summary_to_slack(slack_api_mock, event_aggregator_mock, cmdline_args)\n        expects = os.linesep.join(['===== Event List ====', event_aggregator_mock.get_event_list(), os.linesep, '==== Tree Info ====', 'tree'])\n\n        results = self.output\n        self.assertEqual(expects, results)\n\n        cmdline_args = [f'{__name__}._DummyTask', '--param', 'test']\n        with patch('gokart.slack.SlackConfig.send_tree_info', False):\n            _try_to_send_event_summary_to_slack(slack_api_mock, event_aggregator_mock, cmdline_args)\n        expects = os.linesep.join(\n            [\n                '===== Event List ====',\n                event_aggregator_mock.get_event_list(),\n                os.linesep,\n                '==== Tree Info ====',\n                'Please add SlackConfig.send_tree_info to include tree-info',\n            ]\n        )\n\n        results = self.output\n        self.assertEqual(expects, results)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_s3_config.py",
    "content": "import unittest\n\nfrom gokart.s3_config import S3Config\n\n\nclass TestS3Config(unittest.TestCase):\n    def test_get_same_s3_client(self):\n        client_a = S3Config().get_s3_client()\n        client_b = S3Config().get_s3_client()\n\n        self.assertEqual(client_a, client_b)\n"
  },
  {
    "path": "test/test_s3_zip_client.py",
    "content": "import os\nimport shutil\nimport unittest\n\nimport boto3\nfrom moto import mock_aws\n\nfrom gokart.s3_zip_client import S3ZipClient\nfrom test.util import _get_temporary_directory\n\n\nclass TestS3ZipClient(unittest.TestCase):\n    def setUp(self):\n        self.temporary_directory = _get_temporary_directory()\n\n    def tearDown(self):\n        shutil.rmtree(self.temporary_directory, ignore_errors=True)\n\n        # remove temporary zip archive if exists.\n        if os.path.exists(f'{self.temporary_directory}.zip'):\n            os.remove(f'{self.temporary_directory}.zip')\n\n    @mock_aws\n    def test_make_archive(self):\n        conn = boto3.resource('s3', region_name='us-east-1')\n        conn.create_bucket(Bucket='test')\n\n        file_path = os.path.join('s3://test/', 'test.zip')\n        temporary_directory = self.temporary_directory\n\n        zip_client = S3ZipClient(file_path=file_path, temporary_directory=temporary_directory)\n        # raise error if temporary directory does not exist.\n        with self.assertRaises(FileNotFoundError):\n            zip_client.make_archive()\n\n        # run without error because temporary directory exists.\n        os.makedirs(temporary_directory, exist_ok=True)\n        zip_client.make_archive()\n\n    @mock_aws\n    def test_unpack_archive(self):\n        conn = boto3.resource('s3', region_name='us-east-1')\n        conn.create_bucket(Bucket='test')\n\n        file_path = os.path.join('s3://test/', 'test.zip')\n        in_temporary_directory = os.path.join(self.temporary_directory, 'in', 'dummy')\n        out_temporary_directory = os.path.join(self.temporary_directory, 'out', 'dummy')\n\n        # make dummy zip file.\n        os.makedirs(in_temporary_directory, exist_ok=True)\n        in_zip_client = S3ZipClient(file_path=file_path, temporary_directory=in_temporary_directory)\n        in_zip_client.make_archive()\n\n        # load dummy zip file.\n        out_zip_client = S3ZipClient(file_path=file_path, temporary_directory=out_temporary_directory)\n        self.assertFalse(os.path.exists(out_temporary_directory))\n        out_zip_client.unpack_archive()\n"
  },
  {
    "path": "test/test_serializable_parameter.py",
    "content": "import json\nimport tempfile\nfrom dataclasses import asdict, dataclass\nfrom typing import Any\n\nimport luigi\nimport pytest\nfrom luigi.cmdline_parser import CmdlineParser\nfrom mypy import api\n\nfrom gokart import SerializableParameter, TaskOnKart\nfrom test.config import PYPROJECT_TOML\n\n\n@dataclass(frozen=True)\nclass Config:\n    foo: int\n    bar: str\n\n    def gokart_serialize(self) -> str:\n        # dict is ordered in Python 3.7+\n        return json.dumps(asdict(self))\n\n    @classmethod\n    def gokart_deserialize(cls, s: str) -> 'Config':\n        return cls(**json.loads(s))\n\n\nclass SerializableParameterWithOutDefault(TaskOnKart[Any]):\n    task_namespace = __name__\n    config: SerializableParameter[Config] = SerializableParameter(object_type=Config)\n\n    def run(self):\n        self.dump(self.config)\n\n\nclass SerializableParameterWithDefault(TaskOnKart[Any]):\n    task_namespace = __name__\n    config: SerializableParameter[Config] = SerializableParameter(object_type=Config, default=Config(foo=1, bar='bar'))\n\n    def run(self):\n        self.dump(self.config)\n\n\nclass TestSerializableParameter:\n    def test_default(self):\n        with CmdlineParser.global_instance([f'{__name__}.SerializableParameterWithDefault']) as cp:\n            assert cp.get_task_obj().config == Config(foo=1, bar='bar')\n\n    def test_parse_param(self):\n        with CmdlineParser.global_instance([f'{__name__}.SerializableParameterWithOutDefault', '--config', '{\"foo\": 100, \"bar\": \"val\"}']) as cp:\n            assert cp.get_task_obj().config == Config(foo=100, bar='val')\n\n    def test_missing_parameter(self):\n        with pytest.raises(luigi.parameter.MissingParameterException):\n            with CmdlineParser.global_instance([f'{__name__}.SerializableParameterWithOutDefault']) as cp:\n                cp.get_task_obj()\n\n    def test_value_error(self):\n        with pytest.raises(ValueError):\n            with CmdlineParser.global_instance([f'{__name__}.SerializableParameterWithOutDefault', '--config', 'Foo']) as cp:\n                cp.get_task_obj()\n\n    def test_expected_one_argument_error(self):\n        with pytest.raises(SystemExit):\n            with CmdlineParser.global_instance([f'{__name__}.SerializableParameterWithOutDefault', '--config']) as cp:\n                cp.get_task_obj()\n\n    def test_mypy(self):\n        \"\"\"check invalid object cannot used for SerializableParameter\"\"\"\n\n        test_code = \"\"\"\nimport gokart\n\nclass InvalidClass:\n    ...\n\ngokart.SerializableParameter(object_type=InvalidClass)\n        \"\"\"\n        with tempfile.NamedTemporaryFile(suffix='.py') as test_file:\n            test_file.write(test_code.encode('utf-8'))\n            test_file.flush()\n            result = api.run(['--no-incremental', '--cache-dir=/dev/null', '--config-file', str(PYPROJECT_TOML), test_file.name])\n            assert 'Value of type variable \"S\" of \"SerializableParameter\" cannot be \"InvalidClass\"  [type-var]' in result[0]\n"
  },
  {
    "path": "test/test_target.py",
    "content": "import io\nimport os\nimport shutil\nimport unittest\nfrom datetime import datetime\nfrom unittest.mock import patch\n\nimport boto3\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot\nfrom moto import mock_aws\n\nfrom gokart.file_processor.base import _ChunkedLargeFileReader\nfrom gokart.target import make_model_target, make_target\nfrom test.util import _get_temporary_directory\n\n\nclass LocalTargetTest(unittest.TestCase):\n    def setUp(self):\n        self.temporary_directory = _get_temporary_directory()\n\n    def tearDown(self):\n        shutil.rmtree(self.temporary_directory, ignore_errors=True)\n\n    def test_save_and_load_pickle_file(self):\n        obj = 1\n        file_path = os.path.join(self.temporary_directory, 'test.pkl')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        with unittest.mock.patch('gokart.file_processor.base._ChunkedLargeFileReader', wraps=_ChunkedLargeFileReader) as monkey:\n            loaded = target.load()\n            monkey.assert_called()\n\n        self.assertEqual(loaded, obj)\n\n    def test_save_and_load_text_file(self):\n        obj = 1\n        file_path = os.path.join(self.temporary_directory, 'test.txt')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        self.assertEqual(loaded, [str(obj)], msg='should save an object as List[str].')\n\n    def test_save_and_load_gzip(self):\n        obj = 1\n        file_path = os.path.join(self.temporary_directory, 'test.gz')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        self.assertEqual(loaded, [str(obj)], msg='should save an object as List[str].')\n\n    def test_save_and_load_npz(self):\n        obj = np.ones(shape=10, dtype=np.float32)\n        file_path = os.path.join(self.temporary_directory, 'test.npz')\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        np.testing.assert_almost_equal(obj, loaded)\n\n    def test_save_and_load_figure(self):\n        figure_binary = io.BytesIO()\n        pd.DataFrame(dict(x=range(10), y=range(10))).plot.scatter(x='x', y='y')\n        pyplot.savefig(figure_binary)\n        figure_binary.seek(0)\n        file_path = os.path.join(self.temporary_directory, 'test.png')\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(figure_binary.read())\n\n        loaded = target.load()\n        self.assertGreater(len(loaded), 1000)  # any binary\n\n    def test_save_and_load_csv(self):\n        obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))\n        file_path = os.path.join(self.temporary_directory, 'test.csv')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        pd.testing.assert_frame_equal(loaded, obj)\n\n    def test_save_and_load_tsv(self):\n        obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))\n        file_path = os.path.join(self.temporary_directory, 'test.tsv')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        pd.testing.assert_frame_equal(loaded, obj)\n\n    def test_save_and_load_parquet(self):\n        obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))\n        file_path = os.path.join(self.temporary_directory, 'test.parquet')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        pd.testing.assert_frame_equal(loaded, obj)\n\n    def test_save_and_load_feather(self):\n        obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]), index=pd.Index([33, 44], name='object_index'))\n        file_path = os.path.join(self.temporary_directory, 'test.feather')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        pd.testing.assert_frame_equal(loaded, obj)\n\n    def test_save_and_load_feather_without_store_index_in_feather(self):\n        obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]), index=pd.Index([33, 44], name='object_index')).reset_index()\n        file_path = os.path.join(self.temporary_directory, 'test.feather')\n\n        target = make_target(file_path=file_path, unique_id=None, store_index_in_feather=False)\n        target.dump(obj)\n        loaded = target.load()\n\n        pd.testing.assert_frame_equal(loaded, obj)\n\n    def test_last_modified_time(self):\n        obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))\n        file_path = os.path.join(self.temporary_directory, 'test.csv')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        t = target.last_modification_time()\n        self.assertIsInstance(t, datetime)\n\n    def test_last_modified_time_without_file(self):\n        file_path = os.path.join(self.temporary_directory, 'test.csv')\n        target = make_target(file_path=file_path, unique_id=None)\n        with self.assertRaises(FileNotFoundError):\n            target.last_modification_time()\n\n    def test_save_pandas_series(self):\n        obj = pd.Series(data=[1, 2], name='column_name')\n        file_path = os.path.join(self.temporary_directory, 'test.csv')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        pd.testing.assert_series_equal(loaded['column_name'], obj)\n\n    def test_dump_with_lock(self):\n        with patch('gokart.target.wrap_dump_with_lock') as wrap_with_lock_mock:\n            obj = 1\n            file_path = os.path.join(self.temporary_directory, 'test.pkl')\n            target = make_target(file_path=file_path, unique_id=None)\n            target.dump(obj, lock_at_dump=True)\n\n            wrap_with_lock_mock.assert_called_once()\n\n    def test_dump_without_lock(self):\n        with patch('gokart.target.wrap_dump_with_lock') as wrap_with_lock_mock:\n            obj = 1\n            file_path = os.path.join(self.temporary_directory, 'test.pkl')\n            target = make_target(file_path=file_path, unique_id=None)\n            target.dump(obj, lock_at_dump=False)\n\n            wrap_with_lock_mock.assert_not_called()\n\n\nclass S3TargetTest(unittest.TestCase):\n    @mock_aws\n    def test_save_on_s3(self):\n        conn = boto3.resource('s3', region_name='us-east-1')\n        conn.create_bucket(Bucket='test')\n\n        obj = 1\n        file_path = os.path.join('s3://test/', 'test.pkl')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        self.assertEqual(loaded, obj)\n\n    @mock_aws\n    def test_last_modified_time(self):\n        conn = boto3.resource('s3', region_name='us-east-1')\n        conn.create_bucket(Bucket='test')\n\n        obj = 1\n        file_path = os.path.join('s3://test/', 'test.pkl')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        t = target.last_modification_time()\n        self.assertIsInstance(t, datetime)\n\n    @mock_aws\n    def test_last_modified_time_without_file(self):\n        conn = boto3.resource('s3', region_name='us-east-1')\n        conn.create_bucket(Bucket='test')\n\n        file_path = os.path.join('s3://test/', 'test.pkl')\n        target = make_target(file_path=file_path, unique_id=None)\n        with self.assertRaises(FileNotFoundError):\n            target.last_modification_time()\n\n    @mock_aws\n    def test_save_on_s3_feather(self):\n        conn = boto3.resource('s3', region_name='us-east-1')\n        conn.create_bucket(Bucket='test')\n\n        obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))\n        file_path = os.path.join('s3://test/', 'test.feather')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        pd.testing.assert_frame_equal(loaded, obj)\n\n    @mock_aws\n    def test_save_on_s3_parquet(self):\n        conn = boto3.resource('s3', region_name='us-east-1')\n        conn.create_bucket(Bucket='test')\n\n        obj = pd.DataFrame(dict(a=[1, 2], b=[3, 4]))\n        file_path = os.path.join('s3://test/', 'test.parquet')\n\n        target = make_target(file_path=file_path, unique_id=None)\n        target.dump(obj)\n        loaded = target.load()\n\n        pd.testing.assert_frame_equal(loaded, obj)\n\n\nclass ModelTargetTest(unittest.TestCase):\n    def setUp(self):\n        self.temporary_directory = _get_temporary_directory()\n\n    def tearDown(self):\n        shutil.rmtree(self.temporary_directory, ignore_errors=True)\n\n    @staticmethod\n    def _save_function(obj, path):\n        make_target(file_path=path).dump(obj)\n\n    @staticmethod\n    def _load_function(path):\n        return make_target(file_path=path).load()\n\n    def test_model_target_on_local(self):\n        obj = 1\n        file_path = os.path.join(self.temporary_directory, 'test.zip')\n\n        target = make_model_target(\n            file_path=file_path, temporary_directory=self.temporary_directory, save_function=self._save_function, load_function=self._load_function\n        )\n\n        target.dump(obj)\n        loaded = target.load()\n\n        self.assertEqual(loaded, obj)\n\n    @mock_aws\n    def test_model_target_on_s3(self):\n        conn = boto3.resource('s3', region_name='us-east-1')\n        conn.create_bucket(Bucket='test')\n\n        obj = 1\n        file_path = os.path.join('s3://test/', 'test.zip')\n\n        target = make_model_target(\n            file_path=file_path, temporary_directory=self.temporary_directory, save_function=self._save_function, load_function=self._load_function\n        )\n\n        target.dump(obj)\n        loaded = target.load()\n\n        self.assertEqual(loaded, obj)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_task_instance_parameter.py",
    "content": "import unittest\nfrom typing import Any\n\nimport luigi\n\nimport gokart\nfrom gokart import ListTaskInstanceParameter, TaskInstanceParameter, TaskOnKart\n\n\nclass _DummySubTask(TaskOnKart[Any]):\n    task_namespace = __name__\n    pass\n\n\nclass _DummyCorrectSubClassTask(_DummySubTask):\n    task_namespace = __name__\n    pass\n\n\nclass _DummyInvalidSubClassTask(TaskOnKart[Any]):\n    task_namespace = __name__\n    pass\n\n\nclass _DummyTask(TaskOnKart[Any]):\n    task_namespace = __name__\n    param: luigi.IntParameter = luigi.IntParameter()\n    task: TaskInstanceParameter[_DummySubTask] = TaskInstanceParameter(default=_DummySubTask())\n\n\nclass _DummyListTask(TaskOnKart[Any]):\n    task_namespace = __name__\n    param: luigi.IntParameter = luigi.IntParameter()\n    task: ListTaskInstanceParameter[_DummySubTask] = ListTaskInstanceParameter(default=[_DummySubTask(), _DummySubTask()])\n\n\nclass TaskInstanceParameterTest(unittest.TestCase):\n    def setUp(self):\n        _DummyTask.clear_instance_cache()\n\n    def test_serialize_and_parse(self):\n        original = _DummyTask(param=2)\n        s = gokart.TaskInstanceParameter().serialize(original)\n        parsed = gokart.TaskInstanceParameter().parse(s)\n        self.assertEqual(parsed.task_id, original.task_id)\n\n    def test_serialize_and_parse_list_params(self):\n        original = _DummyListTask(param=2)\n        s = gokart.TaskInstanceParameter().serialize(original)\n        parsed = gokart.TaskInstanceParameter().parse(s)\n        self.assertEqual(parsed.task_id, original.task_id)\n\n    def test_invalid_class(self):\n        self.assertRaises(TypeError, lambda: gokart.TaskInstanceParameter(expected_type=1))  # type: ignore\n\n    def test_params_with_correct_param_type(self):\n        class _DummyPipelineA(TaskOnKart[Any]):\n            task_namespace = __name__\n            subtask: gokart.TaskInstanceParameter[_DummySubTask] = gokart.TaskInstanceParameter(expected_type=_DummySubTask)\n\n        task = _DummyPipelineA(subtask=_DummyCorrectSubClassTask())\n        self.assertEqual(task.requires()['subtask'], _DummyCorrectSubClassTask())  # type: ignore\n\n    def test_params_with_invalid_param_type(self):\n        class _DummyPipelineB(TaskOnKart[Any]):\n            task_namespace = __name__\n            subtask: gokart.TaskInstanceParameter[_DummySubTask] = gokart.TaskInstanceParameter(expected_type=_DummySubTask)\n\n        with self.assertRaises(TypeError):\n            _DummyPipelineB(subtask=_DummyInvalidSubClassTask())  # type: ignore\n\n\nclass ListTaskInstanceParameterTest(unittest.TestCase):\n    def setUp(self):\n        _DummyTask.clear_instance_cache()\n\n    def test_invalid_class(self):\n        self.assertRaises(TypeError, lambda: gokart.ListTaskInstanceParameter(expected_elements_type=1))  # type: ignore  # not type instance\n\n    def test_list_params_with_correct_param_types(self):\n        class _DummyPipelineC(TaskOnKart[Any]):\n            task_namespace = __name__\n            subtask: gokart.ListTaskInstanceParameter[_DummySubTask] = gokart.ListTaskInstanceParameter(expected_elements_type=_DummySubTask)\n\n        task = _DummyPipelineC(subtask=[_DummyCorrectSubClassTask()])\n        self.assertEqual(task.requires()['subtask'], (_DummyCorrectSubClassTask(),))  # type: ignore\n\n    def test_list_params_with_invalid_param_types(self):\n        class _DummyPipelineD(TaskOnKart[Any]):\n            task_namespace = __name__\n            subtask: gokart.ListTaskInstanceParameter[_DummySubTask] = gokart.ListTaskInstanceParameter(expected_elements_type=_DummySubTask)\n\n        with self.assertRaises(TypeError):\n            _DummyPipelineD(subtask=[_DummyInvalidSubClassTask(), _DummyCorrectSubClassTask()])  # type: ignore\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_task_on_kart.py",
    "content": "from __future__ import annotations\n\nimport os\nimport pathlib\nimport unittest\nfrom datetime import datetime\nfrom typing import Any, cast\nfrom unittest.mock import Mock, patch\n\nimport luigi\nimport pandas as pd\nfrom luigi.parameter import ParameterVisibility\nfrom luigi.util import inherits\n\nimport gokart\nfrom gokart.file_processor import XmlFileProcessor\nfrom gokart.parameter import ListTaskInstanceParameter, TaskInstanceParameter\nfrom gokart.target import ModelTarget, SingleFileTarget, TargetOnKart\nfrom gokart.task import EmptyDumpError\n\n\nclass _DummyTask(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n    param: luigi.IntParameter = luigi.IntParameter(default=1)\n    list_param: luigi.ListParameter[tuple[str, ...]] = luigi.ListParameter(default=('a', 'b'))\n    bool_param: luigi.BoolParameter = luigi.BoolParameter()\n\n    def output(self):\n        return None\n\n\nclass _DummyTaskA(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n    def output(self):\n        return None\n\n\n@inherits(_DummyTaskA)\nclass _DummyTaskB(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n    def output(self):\n        return None\n\n    def requires(self):\n        return self.clone(_DummyTaskA)\n\n\n@inherits(_DummyTaskB)\nclass _DummyTaskC(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n    def output(self):\n        return None\n\n    def requires(self):\n        return self.clone(_DummyTaskB)\n\n\nclass _DummyTaskD(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n\nclass _DummyTaskWithoutLock(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n    def run(self):\n        pass\n\n\nclass _DummySubTaskWithPrivateParameter(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n\nclass _DummyTaskWithPrivateParameter(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n    int_param: luigi.IntParameter = luigi.IntParameter()\n    private_int_param: luigi.IntParameter = luigi.IntParameter(visibility=ParameterVisibility.PRIVATE)\n    task_param: TaskInstanceParameter[Any] = TaskInstanceParameter()\n    list_task_param: ListTaskInstanceParameter[Any] = ListTaskInstanceParameter()\n\n\nclass TaskTest(unittest.TestCase):\n    def setUp(self):\n        _DummyTask.clear_instance_cache()\n        _DummyTaskA.clear_instance_cache()\n        _DummyTaskB.clear_instance_cache()\n        _DummyTaskC.clear_instance_cache()\n\n    def test_complete_without_dependency(self):\n        task = _DummyTask()\n        self.assertTrue(task.complete(), msg='_DummyTask does not have any output files, so this always must be completed.')\n\n    def test_complete_with_rerun_flag(self):\n        task = _DummyTask(rerun=True)\n        self.assertFalse(task.complete(), msg='\"rerun\" flag force tasks rerun once.')\n        self.assertTrue(task.complete(), msg='\"rerun\" flag should be changed.')\n\n    def test_complete_with_uncompleted_input(self):\n        uncompleted_target = Mock(spec=TargetOnKart)\n        uncompleted_target.exists.return_value = False\n\n        # depends on an uncompleted target.\n        task = _DummyTask()\n        task.input = Mock(return_value=uncompleted_target)  # type: ignore\n        self.assertTrue(task.complete(), msg='task does not care input targets.')\n\n        # make a task check its inputs.\n        task.strict_check = True\n        self.assertFalse(task.complete())\n\n    def test_complete_with_modified_input(self):\n        input_target = Mock(spec=TargetOnKart)\n        input_target.exists.return_value = True\n        input_target.last_modification_time.return_value = datetime(2018, 1, 1, 10, 0, 0)\n        output_target = Mock(spec=TargetOnKart)\n        output_target.exists.return_value = True\n        output_target.last_modification_time.return_value = datetime(2018, 1, 1, 9, 0, 0)\n\n        # depends on an uncompleted target.\n        task = _DummyTask()\n        task.modification_time_check = False\n        task.input = Mock(return_value=input_target)  # type: ignore\n        task.output = Mock(return_value=output_target)  # type: ignore\n        self.assertTrue(task.complete(), msg='task does not care modified time')\n\n        # make a task check its inputs.\n        task.modification_time_check = True\n        self.assertFalse(task.complete())\n\n    def test_complete_when_modification_time_equals_output(self):\n        \"\"\"Test the case that modification time of input equals that of output.\n        The case is occurred when input and output targets are same.\n        \"\"\"\n        input_target = Mock(spec=TargetOnKart)\n        input_target.exists.return_value = True\n        input_target.last_modification_time.return_value = datetime(2018, 1, 1, 10, 0, 0)\n        output_target = Mock(spec=TargetOnKart)\n        output_target.exists.return_value = True\n        output_target.last_modification_time.return_value = datetime(2018, 1, 1, 10, 0, 0)\n\n        task = _DummyTask()\n        task.modification_time_check = True\n        task.input = Mock(return_value=input_target)  # type: ignore\n        task.output = Mock(return_value=output_target)  # type: ignore\n        self.assertTrue(task.complete())\n\n    def test_complete_when_input_and_output_equal(self):\n        target1 = Mock(spec=TargetOnKart)\n        target1.exists.return_value = True\n        target1.path.return_value = 'path1.pkl'\n        target1.last_modification_time.return_value = datetime(2018, 1, 1, 10, 0, 0)\n\n        target2 = Mock(spec=TargetOnKart)\n        target2.exists.return_value = True\n        target2.path.return_value = 'path2.pkl'\n        target2.last_modification_time.return_value = datetime(2018, 1, 1, 9, 0, 0)\n\n        target3 = Mock(spec=TargetOnKart)\n        target3.exists.return_value = True\n        target3.path.return_value = 'path3.pkl'\n        target3.last_modification_time.return_value = datetime(2018, 1, 1, 9, 0, 0)\n\n        task = _DummyTask()\n        task.modification_time_check = True\n        task.input = Mock(return_value=[target1, target2])  # type: ignore\n        task.output = Mock(return_value=[target1, target2])  # type: ignore\n        self.assertTrue(task.complete())\n\n        task.input = Mock(return_value=[target1, target2])  # type: ignore\n        task.output = Mock(return_value=[target2, target3])  # type: ignore\n        self.assertFalse(task.complete())\n\n    def test_default_target(self):\n        task = _DummyTaskD()\n        default_target = task.output()\n        self.assertIsInstance(default_target, SingleFileTarget)\n        self.assertEqual(f'_DummyTaskD_{task.task_unique_id}.pkl', pathlib.Path(default_target._target.path).name)  # type: ignore\n\n    def test_clone_with_special_params(self):\n        class _DummyTaskRerun(gokart.TaskOnKart[Any]):\n            a: luigi.BoolParameter = luigi.BoolParameter(default=False)\n\n        task = _DummyTaskRerun(a=True, rerun=True)\n        cloned = task.clone(_DummyTaskRerun)\n        cloned_with_explicit_rerun = task.clone(_DummyTaskRerun, rerun=True)\n        self.assertTrue(cloned.a)\n        self.assertFalse(cloned.rerun)  # do not clone rerun\n        self.assertTrue(cloned_with_explicit_rerun.a)\n        self.assertTrue(cloned_with_explicit_rerun.rerun)\n\n    def test_default_large_dataframe_target(self):\n        task = _DummyTaskD()\n        default_large_dataframe_target = task.make_large_data_frame_target()\n        self.assertIsInstance(default_large_dataframe_target, ModelTarget)\n        target = cast(ModelTarget, default_large_dataframe_target)\n        self.assertEqual(f'_DummyTaskD_{task.task_unique_id}.zip', pathlib.Path(target._zip_client.path).name)\n\n    def test_make_target(self):\n        task = _DummyTask()\n        target = task.make_target('test.txt')\n        self.assertIsInstance(target, SingleFileTarget)\n\n    def test_make_target_without_id(self):\n        path = _DummyTask().make_target('test.txt', use_unique_id=False).path()\n        self.assertEqual(path, os.path.join(_DummyTask().workspace_directory, 'test.txt'))\n\n    def test_make_target_with_processor(self):\n        task = _DummyTask()\n        processor = XmlFileProcessor()\n        target = task.make_target('test.dummy', processor=processor)\n        self.assertIsInstance(target, SingleFileTarget)\n        target = cast(SingleFileTarget, target)\n        self.assertEqual(target._processor, processor)\n\n    def test_get_own_code(self):\n        task = _DummyTask()\n        task_scripts = 'def output(self):\\nreturn None\\n'\n        self.assertEqual(task.get_own_code().replace(' ', ''), task_scripts.replace(' ', ''))\n\n    def test_make_unique_id_with_own_code(self):\n        class _MyDummyTaskA(gokart.TaskOnKart[str]):\n            _visible_in_registry = False\n\n            def run(self):\n                self.dump('Hello, world!')\n\n        task_unique_id = _MyDummyTaskA(serialized_task_definition_check=False).make_unique_id()\n        task_with_code_unique_id = _MyDummyTaskA(serialized_task_definition_check=True).make_unique_id()\n        self.assertNotEqual(task_unique_id, task_with_code_unique_id)\n\n        class _MyDummyTaskA(gokart.TaskOnKart[str]):  # type: ignore\n            _visible_in_registry = False\n\n            def run(self):\n                modified_code = 'modify!!'\n                self.dump(modified_code)\n\n        task_modified_unique_id = _MyDummyTaskA(serialized_task_definition_check=False).make_unique_id()\n        task_modified_with_code_unique_id = _MyDummyTaskA(serialized_task_definition_check=True).make_unique_id()\n        self.assertEqual(task_modified_unique_id, task_unique_id)\n        self.assertNotEqual(task_modified_with_code_unique_id, task_with_code_unique_id)\n\n    def test_compare_targets_of_different_tasks(self):\n        path1 = _DummyTask(param=1).make_target('test.txt').path()\n        path2 = _DummyTask(param=2).make_target('test.txt').path()\n        self.assertNotEqual(path1, path2, msg='different tasks must generate different targets.')\n\n    def test_make_model_target(self):\n        task = _DummyTask()\n        target = task.make_model_target('test.zip', save_function=Mock(), load_function=Mock())\n        self.assertIsInstance(target, ModelTarget)\n\n    def test_load_with_single_target(self):\n        task = _DummyTask()\n        target = Mock(spec=TargetOnKart)\n        target.load.return_value = 1\n        task.input = Mock(return_value=target)  # type: ignore\n\n        data = task.load()\n        target.load.assert_called_once()\n        self.assertEqual(data, 1)\n\n    def test_load_with_single_dict_target(self):\n        task = _DummyTask()\n        target = Mock(spec=TargetOnKart)\n        target.load.return_value = 1\n        task.input = Mock(return_value={'target_key': target})  # type: ignore\n\n        data = task.load()\n        target.load.assert_called_once()\n        self.assertEqual(data, {'target_key': 1})\n\n    def test_load_with_keyword(self):\n        task = _DummyTask()\n        target = Mock(spec=TargetOnKart)\n        target.load.return_value = 1\n        task.input = Mock(return_value={'target_key': target})  # type: ignore\n\n        data = task.load('target_key')\n        target.load.assert_called_once()\n        self.assertEqual(data, 1)\n\n    def test_load_tuple(self):\n        task = _DummyTask()\n        target1 = Mock(spec=TargetOnKart)\n        target1.load.return_value = 1\n        target2 = Mock(spec=TargetOnKart)\n        target2.load.return_value = 2\n        task.input = Mock(return_value=(target1, target2))  # type: ignore\n\n        data = task.load()\n        target1.load.assert_called_once()\n        target2.load.assert_called_once()\n        self.assertEqual(data[0], 1)\n        self.assertEqual(data[1], 2)\n\n    def test_load_dictionary_at_once(self):\n        task = _DummyTask()\n        target1 = Mock(spec=TargetOnKart)\n        target1.load.return_value = 1\n        target2 = Mock(spec=TargetOnKart)\n        target2.load.return_value = 2\n        task.input = Mock(return_value={'target_key_1': target1, 'target_key_2': target2})  # type: ignore\n\n        data = task.load()\n        target1.load.assert_called_once()\n        target2.load.assert_called_once()\n        self.assertEqual(data['target_key_1'], 1)\n        self.assertEqual(data['target_key_2'], 2)\n\n    def test_load_with_task_on_kart(self):\n        task = _DummyTask()\n\n        task2 = Mock(spec=gokart.TaskOnKart)\n        task2.make_unique_id.return_value = 'task2'\n        task2_output = Mock(spec=TargetOnKart)\n        task2.output.return_value = task2_output\n        task2_output.load.return_value = 1\n\n        # task2 should be in requires' return values\n        task.requires = lambda: {'task2': task2}  # type: ignore\n\n        actual = task.load(task2)\n        self.assertEqual(actual, 1)\n\n    def test_load_with_task_on_kart_should_fail_when_task_on_kart_is_not_in_requires(self):\n        \"\"\"\n        if load args is not in requires, it should raise an error.\n        \"\"\"\n        task = _DummyTask()\n\n        task2 = Mock(spec=gokart.TaskOnKart)\n        task2_output = Mock(spec=TargetOnKart)\n        task2.output.return_value = task2_output\n        task2_output.load.return_value = 1\n\n        with self.assertRaises(AssertionError):\n            task.load(task2)\n\n    def test_load_with_task_on_kart_list(self):\n        task = _DummyTask()\n\n        task2 = Mock(spec=gokart.TaskOnKart)\n        task2.make_unique_id.return_value = 'task2'\n        task2_output = Mock(spec=TargetOnKart)\n        task2.output.return_value = task2_output\n        task2_output.load.return_value = 1\n\n        task3 = Mock(spec=gokart.TaskOnKart)\n        task3.make_unique_id.return_value = 'task3'\n        task3_output = Mock(spec=TargetOnKart)\n        task3.output.return_value = task3_output\n        task3_output.load.return_value = 2\n\n        # task2 should be in requires' return values\n        task.requires = lambda: {'tasks': [task2, task3]}  # type: ignore\n\n        load_args: list[gokart.TaskOnKart[int]] = [task2, task3]\n        actual = task.load(load_args)\n        self.assertEqual(actual, [1, 2])\n\n    def test_load_generator_with_single_target(self):\n        task = _DummyTask()\n        target = Mock(spec=TargetOnKart)\n        target.load.return_value = [1, 2]\n        task.input = Mock(return_value=target)  # type: ignore\n        data = [x for x in task.load_generator()]\n        self.assertEqual(data, [[1, 2]])\n\n    def test_load_generator_with_keyword(self):\n        task = _DummyTask()\n        target = Mock(spec=TargetOnKart)\n        target.load.return_value = [1, 2]\n        task.input = Mock(return_value={'target_key': target})  # type: ignore\n        data = [x for x in task.load_generator('target_key')]\n        self.assertEqual(data, [[1, 2]])\n\n    def test_load_generator_with_list_task_on_kart(self):\n        task = _DummyTask()\n\n        task2 = Mock(spec=gokart.TaskOnKart)\n        task2.make_unique_id.return_value = 'task2'\n        task2_output = Mock(spec=TargetOnKart)\n        task2.output.return_value = task2_output\n        task2_output.load.return_value = 1\n\n        task3 = Mock(spec=gokart.TaskOnKart)\n        task3.make_unique_id.return_value = 'task3'\n        task3_output = Mock(spec=TargetOnKart)\n        task3.output.return_value = task3_output\n        task3_output.load.return_value = 2\n\n        # task2 should be in requires' return values\n        task.requires = lambda: {'tasks': [task2, task3]}  # type: ignore\n\n        load_args: list[gokart.TaskOnKart[int]] = [task2, task3]\n        actual = [x for x in task.load_generator(load_args)]\n        self.assertEqual(actual, [1, 2])\n\n    def test_dump(self):\n        task = _DummyTask()\n        target = Mock(spec=TargetOnKart)\n        task.output = Mock(return_value=target)  # type: ignore\n\n        task.dump(1)\n        target.dump.assert_called_once()\n\n    def test_fail_on_empty_dump(self):\n        # do not fail\n        task = _DummyTask(fail_on_empty_dump=False)\n        target = Mock(spec=TargetOnKart)\n        task.output = Mock(return_value=target)  # type: ignore\n        task.dump(pd.DataFrame())\n        target.dump.assert_called_once()\n\n        # fail\n        task = _DummyTask(fail_on_empty_dump=True)\n        self.assertRaises(EmptyDumpError, lambda: task.dump(pd.DataFrame()))\n\n    @patch('luigi.configuration.get_config')\n    def test_add_configuration(self, mock_config: Mock) -> None:\n        mock_config.return_value = {'_DummyTask': {'list_param': '[\"c\", \"d\"]', 'param': '3', 'bool_param': 'True'}}\n        kwargs: dict[str, Any] = dict()\n        _DummyTask._add_configuration(kwargs, '_DummyTask')\n        self.assertEqual(3, kwargs['param'])\n        self.assertEqual(['c', 'd'], list(kwargs['list_param']))\n        self.assertEqual(True, kwargs['bool_param'])\n\n    @patch('luigi.cmdline_parser.CmdlineParser.get_instance')\n    def test_add_cofigureation_evaluation_order(self, mock_cmdline: Mock) -> None:\n        \"\"\"\n        in case TaskOnKart._add_configuration will break evaluation order\n        @see https://luigi.readthedocs.io/en/stable/parameters.html#parameter-resolution-order\n        \"\"\"\n\n        class DummyTaskAddConfiguration(gokart.TaskOnKart[Any]):\n            aa = luigi.IntParameter()\n\n        luigi.configuration.get_config().set('DummyTaskAddConfiguration', 'aa', '3')\n        mock_cmdline.return_value = luigi.cmdline_parser.CmdlineParser(['DummyTaskAddConfiguration'])\n        self.assertEqual(DummyTaskAddConfiguration().aa, 3)\n\n        mock_cmdline.return_value = luigi.cmdline_parser.CmdlineParser(['DummyTaskAddConfiguration', '--DummyTaskAddConfiguration-aa', '2'])\n        self.assertEqual(DummyTaskAddConfiguration().aa, 2)\n\n    def test_use_rerun_with_inherits(self):\n        # All tasks are completed.\n        task_c = _DummyTaskC()\n        self.assertTrue(task_c.complete())\n        self.assertTrue(task_c.requires().complete())  # This is an instance of TaskB.\n        self.assertTrue(task_c.requires().requires().complete())  # This is an instance of TaskA.\n\n        luigi.configuration.get_config().set(f'{__name__}._DummyTaskB', 'rerun', 'True')\n        task_c = _DummyTaskC()\n        self.assertTrue(task_c.complete())\n        self.assertFalse(task_c.requires().complete())  # This is an instance of _DummyTaskB.\n        self.assertTrue(task_c.requires().requires().complete())  # This is an instance of _DummyTaskA.\n\n        # All tasks are not completed, because _DummyTaskC.rerun = True.\n        task_c = _DummyTaskC(rerun=True)\n        self.assertFalse(task_c.complete())\n        self.assertTrue(task_c.requires().complete())  # This is an instance of _DummyTaskB.\n        self.assertTrue(task_c.requires().requires().complete())  # This is an instance of _DummyTaskA.\n\n    def test_significant_flag(self) -> None:\n        def _make_task(significant: bool, has_required_task: bool) -> gokart.TaskOnKart[Any]:\n            class _MyDummyTaskA(gokart.TaskOnKart[Any]):\n                task_namespace = f'{__name__}_{significant}_{has_required_task}'\n\n            class _MyDummyTaskB(gokart.TaskOnKart[Any]):\n                task_namespace = f'{__name__}_{significant}_{has_required_task}'\n\n                def requires(self):\n                    if has_required_task:\n                        return _MyDummyTaskA(significant=significant)\n                    return\n\n            return _MyDummyTaskB()\n\n        x_task = _make_task(significant=True, has_required_task=True)\n        y_task = _make_task(significant=False, has_required_task=True)\n        z_task = _make_task(significant=False, has_required_task=False)\n\n        self.assertNotEqual(x_task.make_unique_id(), y_task.make_unique_id())\n        self.assertEqual(y_task.make_unique_id(), z_task.make_unique_id())\n\n    def test_default_requires(self):\n        class _WithoutTaskInstanceParameter(gokart.TaskOnKart[Any]):\n            task_namespace = __name__\n\n        class _WithTaskInstanceParameter(gokart.TaskOnKart[Any]):\n            task_namespace = __name__\n            a_task: gokart.TaskInstanceParameter[Any] = gokart.TaskInstanceParameter()\n\n        without_task = _WithoutTaskInstanceParameter()\n        self.assertListEqual(without_task.requires(), [])  # type: ignore\n\n        with_task = _WithTaskInstanceParameter(a_task=without_task)\n        self.assertEqual(with_task.requires()['a_task'], without_task)  # type: ignore\n\n    def test_repr(self):\n        task = _DummyTaskWithPrivateParameter(\n            int_param=1,\n            private_int_param=1,\n            task_param=_DummySubTaskWithPrivateParameter(),\n            list_task_param=[_DummySubTaskWithPrivateParameter(), _DummySubTaskWithPrivateParameter()],\n        )\n        task_id = task.make_unique_id()\n        sub_task_id = _DummySubTaskWithPrivateParameter().make_unique_id()\n        expected = (\n            f'{__name__}._DummyTaskWithPrivateParameter[{task_id}](int_param=1, private_int_param=1, task_param={__name__}._DummySubTaskWithPrivateParameter({sub_task_id}), '\n            f'list_task_param=[{__name__}._DummySubTaskWithPrivateParameter({sub_task_id}), {__name__}._DummySubTaskWithPrivateParameter({sub_task_id})])'\n        )  # noqa:E501\n        self.assertEqual(expected, repr(task))\n\n    def test_str(self):\n        task = _DummyTaskWithPrivateParameter(\n            int_param=1,\n            private_int_param=1,\n            task_param=_DummySubTaskWithPrivateParameter(),\n            list_task_param=[_DummySubTaskWithPrivateParameter(), _DummySubTaskWithPrivateParameter()],\n        )\n        task_id = task.make_unique_id()\n        sub_task_id = _DummySubTaskWithPrivateParameter().make_unique_id()\n        expected = (\n            f'{__name__}._DummyTaskWithPrivateParameter[{task_id}](int_param=1, task_param={__name__}._DummySubTaskWithPrivateParameter({sub_task_id}), '\n            f'list_task_param=[{__name__}._DummySubTaskWithPrivateParameter({sub_task_id}), {__name__}._DummySubTaskWithPrivateParameter({sub_task_id})])'\n        )\n        self.assertEqual(expected, str(task))\n\n    def test_is_task_on_kart(self):\n        self.assertEqual(True, gokart.TaskOnKart.is_task_on_kart(gokart.TaskOnKart()))\n        self.assertEqual(False, gokart.TaskOnKart.is_task_on_kart(1))\n        self.assertEqual(False, gokart.TaskOnKart.is_task_on_kart(list()))\n        self.assertEqual(True, gokart.TaskOnKart.is_task_on_kart((gokart.TaskOnKart(), gokart.TaskOnKart())))\n\n    def test_serialize_and_deserialize_default_values(self):\n        task: gokart.TaskOnKart[Any] = gokart.TaskOnKart()\n        deserialized: gokart.TaskOnKart[Any] = luigi.task_register.load_task(None, task.get_task_family(), task.to_str_params())\n        self.assertDictEqual(task.to_str_params(), deserialized.to_str_params())\n\n    def test_to_str_params_changes_on_values_and_flags(self):\n        class _DummyTaskWithParams(gokart.TaskOnKart[Any]):\n            task_namespace = __name__\n            param: luigi.Parameter = luigi.Parameter()\n\n        t1 = _DummyTaskWithParams(param='a')\n        self.assertEqual(t1.to_str_params(), t1.to_str_params())  # cache\n        self.assertEqual(t1.to_str_params(), _DummyTaskWithParams(param='a').to_str_params())  # same value\n        self.assertNotEqual(t1.to_str_params(), _DummyTaskWithParams(param='b').to_str_params())  # different value\n        self.assertNotEqual(t1.to_str_params(), t1.to_str_params(only_significant=True))\n\n    def test_should_lock_run_when_set(self):\n        class _DummyTaskWithLock(gokart.TaskOnKart[str]):\n            def run(self):\n                self.dump('hello')\n\n        task = _DummyTaskWithLock(redis_host='host', redis_port=123, redis_timeout=180, should_lock_run=True)\n        self.assertEqual(task.run.__wrapped__.__name__, 'run')  # type: ignore\n\n    def test_should_fail_lock_run_when_host_unset(self):\n        with self.assertRaises(AssertionError):\n            gokart.TaskOnKart(redis_port=123, redis_timeout=180, should_lock_run=True)\n\n    def test_should_fail_lock_run_when_port_unset(self):\n        with self.assertRaises(AssertionError):\n            gokart.TaskOnKart(redis_host='host', redis_timeout=180, should_lock_run=True)\n\n\nclass _DummyTaskWithNonCompleted(gokart.TaskOnKart[Any]):\n    def dump(self, _obj: Any, _target: Any = None, _custom_labels: Any = None) -> None:\n        # overrive dump() to do nothing.\n        pass\n\n    def run(self):\n        self.dump('hello')\n\n    def complete(self):\n        return False\n\n\nclass _DummyTaskWithCompleted(gokart.TaskOnKart[Any]):\n    def dump(self, obj: Any, _target: Any = None, custom_labels: Any = None) -> None:\n        # overrive dump() to do nothing.\n        pass\n\n    def run(self):\n        self.dump('hello')\n\n    def complete(self):\n        return True\n\n\nclass TestCompleteCheckAtRun(unittest.TestCase):\n    def test_run_when_complete_check_at_run_is_false_and_task_is_not_completed(self):\n        task = _DummyTaskWithNonCompleted(complete_check_at_run=False)\n        task.dump = Mock()  # type: ignore\n        task.run()\n\n        # since run() is called, dump() should be called.\n        task.dump.assert_called_once()\n\n    def test_run_when_complete_check_at_run_is_false_and_task_is_completed(self):\n        task = _DummyTaskWithCompleted(complete_check_at_run=False)\n        task.dump = Mock()  # type: ignore\n        task.run()\n\n        # even task is completed, since run() is called, dump() should be called.\n        task.dump.assert_called_once()\n\n    def test_run_when_complete_check_at_run_is_true_and_task_is_not_completed(self):\n        task = _DummyTaskWithNonCompleted(complete_check_at_run=True)\n        task.dump = Mock()  # type: ignore\n        task.run()\n\n        # since task is not completed, when run() is called, dump() should be called.\n        task.dump.assert_called_once()\n\n    def test_run_when_complete_check_at_run_is_true_and_task_is_completed(self):\n        task = _DummyTaskWithCompleted(complete_check_at_run=True)\n        task.dump = Mock()  # type: ignore\n        task.run()\n\n        # since task is completed, even when run() is called, dump() should not be called.\n        task.dump.assert_not_called()\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/test_utils.py",
    "content": "import unittest\nfrom typing import TYPE_CHECKING\n\nimport pandas as pd\nimport pytest\n\nfrom gokart.task import TaskOnKart\nfrom gokart.utils import flatten, get_dataframe_type_from_task, map_flattenable_items\n\nif TYPE_CHECKING:\n    import polars as pl\n\ntry:\n    import polars as pl\n\n    HAS_POLARS = True\nexcept ImportError:\n    HAS_POLARS = False\n\n\nclass TestFlatten(unittest.TestCase):\n    def test_flatten_dict(self):\n        self.assertEqual(flatten({'a': 'foo', 'b': 'bar'}), ['foo', 'bar'])\n\n    def test_flatten_list(self):\n        self.assertEqual(flatten(['foo', ['bar', 'troll']]), ['foo', 'bar', 'troll'])\n\n    def test_flatten_str(self):\n        self.assertEqual(flatten('foo'), ['foo'])\n\n    def test_flatten_int(self):\n        self.assertEqual(flatten(42), [42])\n\n    def test_flatten_none(self):\n        self.assertEqual(flatten(None), [])\n\n\nclass TestMapFlatten(unittest.TestCase):\n    def test_map_flattenable_items(self):\n        self.assertEqual(map_flattenable_items(lambda x: str(x), {'a': 1, 'b': 2}), {'a': '1', 'b': '2'})\n        self.assertEqual(\n            map_flattenable_items(lambda x: str(x), (1, 2, 3, (4, 5, (6, 7, {'a': (8, 9, 0)})))),\n            ('1', '2', '3', ('4', '5', ('6', '7', {'a': ('8', '9', '0')}))),\n        )\n        self.assertEqual(\n            map_flattenable_items(\n                lambda x: str(x),\n                {'a': [1, 2, 3, '4'], 'b': {'c': True, 'd': {'e': 5}}},\n            ),\n            {'a': ['1', '2', '3', '4'], 'b': {'c': 'True', 'd': {'e': '5'}}},\n        )\n\n\nclass TestGetDataFrameTypeFromTask(unittest.TestCase):\n    \"\"\"Tests for get_dataframe_type_from_task function.\"\"\"\n\n    def test_pandas_dataframe_from_instance(self):\n        \"\"\"Test detecting pandas DataFrame from task instance.\"\"\"\n\n        class _PandasTaskInstance(TaskOnKart[pd.DataFrame]):\n            pass\n\n        task = _PandasTaskInstance()\n        self.assertEqual(get_dataframe_type_from_task(task), 'pandas')\n\n    def test_pandas_dataframe_from_class(self):\n        \"\"\"Test detecting pandas DataFrame from task class.\"\"\"\n\n        class _PandasTaskClass(TaskOnKart[pd.DataFrame]):\n            pass\n\n        self.assertEqual(get_dataframe_type_from_task(_PandasTaskClass), 'pandas')\n\n    @pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\n    def test_polars_dataframe_from_instance(self):\n        \"\"\"Test detecting polars DataFrame from task instance.\"\"\"\n\n        class _PolarsTaskInstance(TaskOnKart[pl.DataFrame]):\n            pass\n\n        task = _PolarsTaskInstance()\n        self.assertEqual(get_dataframe_type_from_task(task), 'polars')\n\n    @pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\n    def test_polars_dataframe_from_class(self):\n        \"\"\"Test detecting polars DataFrame from task class.\"\"\"\n\n        class _PolarsTaskClass(TaskOnKart[pl.DataFrame]):\n            pass\n\n        self.assertEqual(get_dataframe_type_from_task(_PolarsTaskClass), 'polars')\n\n    def test_no_type_parameter_defaults_to_pandas(self):\n        \"\"\"Test that tasks without type parameter default to pandas.\"\"\"\n\n        # Create a class without __orig_bases__ by not using type parameters\n        class PlainTask:\n            pass\n\n        task = PlainTask()\n        self.assertEqual(get_dataframe_type_from_task(task), 'pandas')\n\n    def test_non_taskonkart_class_defaults_to_pandas(self):\n        \"\"\"Test that non-TaskOnKart classes default to pandas.\"\"\"\n\n        class RegularClass:\n            pass\n\n        task = RegularClass()\n        self.assertEqual(get_dataframe_type_from_task(task), 'pandas')\n\n    def test_taskonkart_with_non_dataframe_type(self):\n        \"\"\"Test TaskOnKart with non-DataFrame type parameter defaults to pandas.\"\"\"\n\n        class _StringTask(TaskOnKart[str]):\n            pass\n\n        task = _StringTask()\n        # Should default to pandas since str module is not 'pandas' or 'polars'\n        self.assertEqual(get_dataframe_type_from_task(task), 'pandas')\n\n    def test_nested_inheritance_pandas(self):\n        \"\"\"Test that nested inheritance without direct type parameter defaults to pandas.\"\"\"\n\n        class _BasePandasTask(TaskOnKart[pd.DataFrame]):\n            pass\n\n        class _DerivedPandasTask(_BasePandasTask):\n            pass\n\n        task = _DerivedPandasTask()\n        # _DerivedPandasTask doesn't have its own __orig_bases__ with type parameter,\n        # so it defaults to 'pandas'\n        self.assertEqual(get_dataframe_type_from_task(task), 'pandas')\n\n    @pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\n    def test_nested_inheritance_polars(self):\n        \"\"\"Test detecting polars DataFrame type through nested inheritance.\"\"\"\n\n        class _BasePolarsTask(TaskOnKart[pl.DataFrame]):\n            pass\n\n        class _DerivedPolarsTask(_BasePolarsTask):\n            pass\n\n        task = _DerivedPolarsTask()\n        # Function should detect 'polars' through the inheritance chain\n        self.assertEqual(get_dataframe_type_from_task(task), 'polars')\n\n    @pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\n    def test_polars_lazyframe_from_instance(self):\n        class _LazyTaskInstance(TaskOnKart[pl.LazyFrame]):\n            pass\n\n        task = _LazyTaskInstance()\n        self.assertEqual(get_dataframe_type_from_task(task), 'polars-lazy')\n\n    @pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\n    def test_polars_lazyframe_from_class(self):\n        class _LazyTaskClass(TaskOnKart[pl.LazyFrame]):\n            pass\n\n        self.assertEqual(get_dataframe_type_from_task(_LazyTaskClass), 'polars-lazy')\n\n    @pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\n    def test_nested_inheritance_polars_lazyframe(self):\n        class _BaseLazyTask(TaskOnKart[pl.LazyFrame]):\n            pass\n\n        class _DerivedLazyTask(_BaseLazyTask):\n            pass\n\n        task = _DerivedLazyTask()\n        self.assertEqual(get_dataframe_type_from_task(task), 'polars-lazy')\n\n    @pytest.mark.skipif(not HAS_POLARS, reason='polars not installed')\n    def test_nested_inheritance_polars_with_mixin(self):\n        \"\"\"Derived class with multiple bases should still detect polars through MRO.\"\"\"\n\n        class _Mixin:\n            pass\n\n        class _BasePolarsTaskWithMixin(TaskOnKart[pl.DataFrame]):\n            pass\n\n        # Multiple inheritance gives _DerivedTask its own __orig_bases__,\n        # which shadows the parent's and doesn't contain TaskOnKart[...].\n        class _DerivedTaskWithMixin(_BasePolarsTaskWithMixin, _Mixin):\n            pass\n\n        task = _DerivedTaskWithMixin()\n        self.assertEqual(get_dataframe_type_from_task(task), 'polars')\n"
  },
  {
    "path": "test/test_worker.py",
    "content": "import uuid\nfrom unittest.mock import Mock\n\nimport luigi\nimport luigi.worker\nimport pytest\nfrom luigi import scheduler\n\nimport gokart\nfrom gokart.worker import Worker, gokart_worker\n\n\nclass _DummyTask(gokart.TaskOnKart[str]):\n    task_namespace = __name__\n    random_id: luigi.StrParameter = luigi.StrParameter()\n\n    def _run(self): ...\n\n    def run(self):\n        self._run()\n        self.dump('test')\n\n\nclass TestWorkerRun:\n    def test_run(self, monkeypatch: pytest.MonkeyPatch) -> None:\n        \"\"\"Check run is called when the task is not completed\"\"\"\n        sch = scheduler.Scheduler()\n        worker = Worker(scheduler=sch)\n\n        task = _DummyTask(random_id=uuid.uuid4().hex)\n        mock_run = Mock()\n        monkeypatch.setattr(task, '_run', mock_run)\n        with worker:\n            assert worker.add(task)\n            assert worker.run()\n            mock_run.assert_called_once()\n\n\nclass _DummyTaskToCheckSkip(gokart.TaskOnKart[None]):\n    task_namespace = __name__\n\n    def _run(self): ...\n\n    def run(self):\n        self._run()\n        self.dump(None)\n\n    def complete(self) -> bool:\n        return False\n\n\nclass TestWorkerSkipIfCompletedPreRun:\n    @pytest.mark.parametrize(\n        'task_completion_check_at_run,is_completed,expect_skipped',\n        [\n            pytest.param(True, True, True, id='skipped when completed and task_completion_check_at_run is True'),\n            pytest.param(True, False, False, id='not skipped when not completed and task_completion_check_at_run is True'),\n            pytest.param(False, True, False, id='not skipped when completed and task_completion_check_at_run is False'),\n            pytest.param(False, False, False, id='not skipped when not completed and task_completion_check_at_run is False'),\n        ],\n    )\n    def test_skip_task(self, monkeypatch: pytest.MonkeyPatch, task_completion_check_at_run: bool, is_completed: bool, expect_skipped: bool) -> None:\n        sch = scheduler.Scheduler()\n        worker = Worker(scheduler=sch, config=gokart_worker(task_completion_check_at_run=task_completion_check_at_run))\n\n        mock_complete = Mock(return_value=is_completed)\n        # NOTE: set `complete_check_at_run=False` to avoid using deprecated skip logic.\n        task = _DummyTaskToCheckSkip(complete_check_at_run=False)\n        mock_run = Mock()\n        monkeypatch.setattr(task, '_run', mock_run)\n\n        with worker:\n            assert worker.add(task)\n            # NOTE: mock `complete` after `add` because `add` calls `complete`\n            #       to check if the task is already completed.\n            monkeypatch.setattr(task, 'complete', mock_complete)\n            assert worker.run()\n\n            if expect_skipped:\n                mock_run.assert_not_called()\n            else:\n                mock_run.assert_called_once()\n\n\nclass TestWorkerCheckCompleteValue:\n    def test_does_not_raise_for_boolean_values(self) -> None:\n        worker = Worker(scheduler=scheduler.Scheduler())\n        worker._check_complete_value(True)\n        worker._check_complete_value(False)\n\n    def test_raises_async_completion_exception_for_traceback_wrapper(self) -> None:\n        # NOTE: When Task.complete() raises in an async check, the exception is wrapped\n        #       in TracebackWrapper. This branch must raise AsyncCompletionException.\n        worker = Worker(scheduler=scheduler.Scheduler())\n        wrapped = luigi.worker.TracebackWrapper(trace='dummy traceback')\n        with pytest.raises(luigi.worker.AsyncCompletionException):\n            worker._check_complete_value(wrapped)\n\n    def test_raises_exception_for_non_boolean_value(self) -> None:\n        # NOTE: Pass a non-bool value to verify the runtime guard against a misimplemented\n        #       Task.complete() returning a non-boolean. The type ignore is intentional.\n        worker = Worker(scheduler=scheduler.Scheduler())\n        with pytest.raises(Exception, match='Return value of Task.complete'):\n            worker._check_complete_value('not a bool')  # type: ignore[arg-type]\n"
  },
  {
    "path": "test/test_zoned_date_second_parameter.py",
    "content": "import datetime\nimport unittest\n\nfrom luigi.cmdline_parser import CmdlineParser\n\nfrom gokart import TaskOnKart, ZonedDateSecondParameter\n\n\nclass ZonedDateSecondParameterTaskWithoutDefault(TaskOnKart[datetime.datetime]):\n    task_namespace = __name__\n    dt: ZonedDateSecondParameter = ZonedDateSecondParameter()\n\n    def run(self):\n        self.dump(self.dt)\n\n\nclass ZonedDateSecondParameterTaskWithDefault(TaskOnKart[datetime.datetime]):\n    task_namespace = __name__\n    dt: ZonedDateSecondParameter = ZonedDateSecondParameter(\n        default=datetime.datetime(2025, 2, 21, 12, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(hours=9)))\n    )\n\n    def run(self):\n        self.dump(self.dt)\n\n\nclass ZonedDateSecondParameterTest(unittest.TestCase):\n    def setUp(self):\n        self.default_datetime = datetime.datetime(2025, 2, 21, 12, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(hours=9)))\n        self.default_datetime_str = '2025-02-21T12:00:00+09:00'\n\n    def test_default(self):\n        with CmdlineParser.global_instance([f'{__name__}.ZonedDateSecondParameterTaskWithDefault']) as cp:\n            assert cp.get_task_obj().dt == self.default_datetime\n\n    def test_parse_param_with_tz_suffix(self):\n        with CmdlineParser.global_instance([f'{__name__}.ZonedDateSecondParameterTaskWithDefault', '--dt', '2024-01-20T11:00:00+09:00']) as cp:\n            assert cp.get_task_obj().dt == datetime.datetime(2024, 1, 20, 11, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(hours=9)))\n\n    def test_parse_param_with_Z_suffix(self):\n        with CmdlineParser.global_instance([f'{__name__}.ZonedDateSecondParameterTaskWithDefault', '--dt', '2024-01-20T11:00:00Z']) as cp:\n            assert cp.get_task_obj().dt == datetime.datetime(2024, 1, 20, 11, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(hours=0)))\n\n    def test_parse_param_without_timezone_input(self):\n        with CmdlineParser.global_instance([f'{__name__}.ZonedDateSecondParameterTaskWithoutDefault', '--dt', '2025-02-21T12:00:00']) as cp:\n            assert cp.get_task_obj().dt == datetime.datetime(2025, 2, 21, 12, 0, 0, tzinfo=None)\n\n    def test_parse_method(self):\n        actual = ZonedDateSecondParameter().parse(self.default_datetime_str)\n        expected = self.default_datetime\n        self.assertEqual(actual, expected)\n\n    def test_serialize_task(self):\n        task = ZonedDateSecondParameterTaskWithoutDefault(dt=self.default_datetime)\n        actual = str(task)\n        expected = f'(dt={self.default_datetime_str})'\n        self.assertTrue(actual.endswith(expected))\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "test/testing/__init__.py",
    "content": ""
  },
  {
    "path": "test/testing/test_pandas_assert.py",
    "content": "import unittest\n\nimport pandas as pd\n\nimport gokart\n\n\nclass TestPandasAssert(unittest.TestCase):\n    def test_assert_frame_contents_equal(self):\n        expected = pd.DataFrame(data=dict(f1=[1, 2, 3], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])\n        resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2, 1, 3], f3=[222, 111, 333]), index=[1, 0, 2])\n\n        gokart.testing.assert_frame_contents_equal(resulted, expected)\n\n    def test_assert_frame_contents_equal_with_small_error(self):\n        expected = pd.DataFrame(data=dict(f1=[1.0001, 2.0001, 3.0001], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])\n        resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2.0002, 1.0002, 3.0002], f3=[222, 111, 333]), index=[1, 0, 2])\n\n        gokart.testing.assert_frame_contents_equal(resulted, expected, atol=1e-1)\n\n    def test_assert_frame_contents_equal_with_duplicated_columns(self):\n        expected = pd.DataFrame(data=dict(f1=[1, 2, 3], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])\n        expected.columns = ['f1', 'f1', 'f2']\n        resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2, 1, 3], f3=[222, 111, 333]), index=[1, 0, 2])\n        resulted.columns = ['f2', 'f1', 'f1']\n\n        with self.assertRaises(AssertionError):\n            gokart.testing.assert_frame_contents_equal(resulted, expected)\n\n    def test_assert_frame_contents_equal_with_duplicated_indexes(self):\n        expected = pd.DataFrame(data=dict(f1=[1, 2, 3], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])\n        expected.index = [0, 1, 1]\n        resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2, 1, 3], f3=[222, 111, 333]), index=[1, 0, 2])\n        expected.index = [1, 0, 1]\n\n        with self.assertRaises(AssertionError):\n            gokart.testing.assert_frame_contents_equal(resulted, expected)\n"
  },
  {
    "path": "test/tree/__init__.py",
    "content": ""
  },
  {
    "path": "test/tree/test_task_info.py",
    "content": "from __future__ import annotations\n\nimport unittest\nfrom typing import Any\nfrom unittest.mock import patch\n\nimport luigi\nimport luigi.mock\nfrom luigi.mock import MockFileSystem, MockTarget\n\nimport gokart\nfrom gokart.tree.task_info import dump_task_info_table, dump_task_info_tree, make_task_info_as_tree_str, make_task_info_tree\n\n\nclass _SubTask(gokart.TaskOnKart[str]):\n    task_namespace = __name__\n    param: luigi.IntParameter = luigi.IntParameter()\n\n    def output(self):\n        return self.make_target('sub_task.txt')\n\n    def run(self):\n        self.dump(f'task uid = {self.make_unique_id()}')\n\n\nclass _Task(gokart.TaskOnKart[str]):\n    task_namespace = __name__\n    param: luigi.IntParameter = luigi.IntParameter(default=10)\n    sub: gokart.TaskInstanceParameter[_SubTask] = gokart.TaskInstanceParameter(default=_SubTask(param=20))\n\n    def requires(self):\n        return self.sub\n\n    def output(self):\n        return self.make_target('task.txt')\n\n    def run(self):\n        self.dump(f'task uid = {self.make_unique_id()}')\n\n\nclass _DoubleLoadSubTask(gokart.TaskOnKart[str]):\n    task_namespace = __name__\n    sub1: gokart.TaskInstanceParameter[gokart.TaskOnKart[Any]] = gokart.TaskInstanceParameter()\n    sub2: gokart.TaskInstanceParameter[gokart.TaskOnKart[Any]] = gokart.TaskInstanceParameter()\n\n    def output(self):\n        return self.make_target('sub_task.txt')\n\n    def run(self):\n        self.dump(f'task uid = {self.make_unique_id()}')\n\n\nclass TestInfo(unittest.TestCase):\n    def setUp(self) -> None:\n        MockFileSystem().clear()\n        luigi.setup_logging.DaemonLogging._configured = False\n        luigi.setup_logging.InterfaceLogging._configured = False\n\n    def tearDown(self) -> None:\n        luigi.setup_logging.DaemonLogging._configured = False\n        luigi.setup_logging.InterfaceLogging._configured = False\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_pending(self):\n        task = _Task(param=1, sub=_SubTask(param=2))\n\n        # check before running\n        tree = make_task_info_as_tree_str(task)\n        expected = r\"\"\"\n└─-\\(PENDING\\) _Task\\[[a-z0-9]*\\]\n   └─-\\(PENDING\\) _SubTask\\[[a-z0-9]*\\]$\"\"\"\n        self.assertRegex(tree, expected)\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_complete(self):\n        task = _Task(param=1, sub=_SubTask(param=2))\n\n        # check after sub task runs\n        gokart.build(task, reset_register=False)\n        tree = make_task_info_as_tree_str(task)\n        expected = r\"\"\"\n└─-\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n   └─-\\(COMPLETE\\) _SubTask\\[[a-z0-9]*\\]$\"\"\"\n        self.assertRegex(tree, expected)\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_abbreviation(self):\n        task = _DoubleLoadSubTask(\n            sub1=_Task(param=1, sub=_SubTask(param=2)),\n            sub2=_Task(param=1, sub=_SubTask(param=2)),\n        )\n\n        # check after sub task runs\n        gokart.build(task, reset_register=False)\n        tree = make_task_info_as_tree_str(task)\n        expected = r\"\"\"\n└─-\\(COMPLETE\\) _DoubleLoadSubTask\\[[a-z0-9]*\\]\n   \\|--\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n   \\|  └─-\\(COMPLETE\\) _SubTask\\[[a-z0-9]*\\]\n   └─-\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n      └─- \\.\\.\\.$\"\"\"\n        self.assertRegex(tree, expected)\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_not_compress(self):\n        task = _DoubleLoadSubTask(\n            sub1=_Task(param=1, sub=_SubTask(param=2)),\n            sub2=_Task(param=1, sub=_SubTask(param=2)),\n        )\n\n        # check after sub task runs\n        gokart.build(task, reset_register=False)\n        tree = make_task_info_as_tree_str(task, abbr=False)\n        expected = r\"\"\"\n└─-\\(COMPLETE\\) _DoubleLoadSubTask\\[[a-z0-9]*\\]\n   \\|--\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n   \\|  └─-\\(COMPLETE\\) _SubTask\\[[a-z0-9]*\\]\n   └─-\\(COMPLETE\\) _Task\\[[a-z0-9]*\\]\n      └─-\\(COMPLETE\\) _SubTask\\[[a-z0-9]*\\]$\"\"\"\n        self.assertRegex(tree, expected)\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_not_compress_ignore_task(self):\n        task = _DoubleLoadSubTask(\n            sub1=_Task(param=1, sub=_SubTask(param=2)),\n            sub2=_Task(param=1, sub=_SubTask(param=2)),\n        )\n\n        # check after sub task runs\n        gokart.build(task, reset_register=False)\n        tree = make_task_info_as_tree_str(task, abbr=False, ignore_task_names=['_Task'])\n        expected = r\"\"\"\n└─-\\(COMPLETE\\) _DoubleLoadSubTask\\[[a-z0-9]*\\]$\"\"\"\n        self.assertRegex(tree, expected)\n\n    @patch('luigi.LocalTarget', new=lambda path, **kwargs: MockTarget(path, **kwargs))\n    def test_make_tree_info_with_cache(self):\n        task = _DoubleLoadSubTask(\n            sub1=_Task(param=1, sub=_SubTask(param=2)),\n            sub2=_Task(param=1, sub=_SubTask(param=2)),\n        )\n\n        # check child task_info is the same object\n        tree = make_task_info_tree(task)\n        self.assertTrue(tree.children_task_infos[0] is tree.children_task_infos[1])\n\n\nclass _TaskInfoExampleTaskA(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n\nclass _TaskInfoExampleTaskB(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n\nclass _TaskInfoExampleTaskC(gokart.TaskOnKart[str]):\n    task_namespace = __name__\n\n    def requires(self):\n        return dict(taskA=_TaskInfoExampleTaskA(), taskB=_TaskInfoExampleTaskB())\n\n    def run(self):\n        self.dump('DONE')\n\n\nclass TestTaskInfoTable(unittest.TestCase):\n    def test_dump_task_info_table(self):\n        with patch('gokart.target.SingleFileTarget.dump') as mock_obj:\n            self.dumped_data: Any = None\n\n            def _side_effect(obj, lock_at_dump):\n                self.dumped_data = obj\n\n            mock_obj.side_effect = _side_effect\n            dump_task_info_table(task=_TaskInfoExampleTaskC(), task_info_dump_path='path.csv', ignore_task_names=['_TaskInfoExampleTaskB'])\n\n            self.assertEqual(set(self.dumped_data['name']), {'_TaskInfoExampleTaskA', '_TaskInfoExampleTaskC'})\n            self.assertEqual(\n                set(self.dumped_data.columns), {'name', 'unique_id', 'output_paths', 'params', 'processing_time', 'is_complete', 'task_log', 'requires'}\n            )\n\n\nclass TestTaskInfoTree(unittest.TestCase):\n    def test_dump_task_info_tree(self):\n        with patch('gokart.target.SingleFileTarget.dump') as mock_obj:\n            self.dumped_data: Any = None\n\n            def _side_effect(obj, lock_at_dump):\n                self.dumped_data = obj\n\n            mock_obj.side_effect = _side_effect\n            dump_task_info_tree(task=_TaskInfoExampleTaskC(), task_info_dump_path='path.pkl', ignore_task_names=['_TaskInfoExampleTaskB'])\n\n            self.assertEqual(self.dumped_data.name, '_TaskInfoExampleTaskC')\n            self.assertEqual(self.dumped_data.children_task_infos[0].name, '_TaskInfoExampleTaskA')\n\n            self.assertEqual(self.dumped_data.requires.keys(), {'taskA', 'taskB'})\n            self.assertEqual(self.dumped_data.requires['taskA'].name, '_TaskInfoExampleTaskA')\n            self.assertEqual(self.dumped_data.requires['taskB'].name, '_TaskInfoExampleTaskB')\n\n    def test_dump_task_info_tree_with_invalid_path_extention(self):\n        with patch('gokart.target.SingleFileTarget.dump') as mock_obj:\n            self.dumped_data = None\n\n            def _side_effect(obj, lock_at_dump):\n                self.dumped_data = obj\n\n            mock_obj.side_effect = _side_effect\n            with self.assertRaises(AssertionError):\n                dump_task_info_tree(task=_TaskInfoExampleTaskC(), task_info_dump_path='path.csv', ignore_task_names=['_TaskInfoExampleTaskB'])\n"
  },
  {
    "path": "test/tree/test_task_info_formatter.py",
    "content": "import unittest\nfrom typing import Any\n\nimport gokart\nfrom gokart.tree.task_info_formatter import RequiredTask, _make_requires_info\n\n\nclass _RequiredTaskExampleTaskA(gokart.TaskOnKart[Any]):\n    task_namespace = __name__\n\n\nclass TestMakeRequiresInfo(unittest.TestCase):\n    def test_make_requires_info_with_task_on_kart(self):\n        requires = _RequiredTaskExampleTaskA()\n        resulted = _make_requires_info(requires=requires)\n        expected = RequiredTask(name=requires.__class__.__name__, unique_id=requires.make_unique_id())\n        self.assertEqual(resulted, expected)\n\n    def test_make_requires_info_with_list(self):\n        requires = [_RequiredTaskExampleTaskA()]\n        resulted = _make_requires_info(requires=requires)\n        expected = [RequiredTask(name=require.__class__.__name__, unique_id=require.make_unique_id()) for require in requires]\n        self.assertEqual(resulted, expected)\n\n    def test_make_requires_info_with_generator(self):\n        def _requires_gen():\n            return (_RequiredTaskExampleTaskA() for _ in range(2))\n\n        resulted = _make_requires_info(requires=_requires_gen())\n        expected = [RequiredTask(name=require.__class__.__name__, unique_id=require.make_unique_id()) for require in _requires_gen()]\n        self.assertEqual(resulted, expected)\n\n    def test_make_requires_info_with_dict(self):\n        requires = dict(taskA=_RequiredTaskExampleTaskA())\n        resulted = _make_requires_info(requires=requires)\n        expected = {key: RequiredTask(name=require.__class__.__name__, unique_id=require.make_unique_id()) for key, require in requires.items()}\n        self.assertEqual(resulted, expected)\n\n    def test_make_requires_info_with_invalid(self):\n        requires = [1, 2]\n        with self.assertRaises(TypeError):\n            _make_requires_info(requires=requires)\n"
  },
  {
    "path": "test/util.py",
    "content": "import os\nimport uuid\n\n\n# TODO: use pytest.fixture to share this functionality with other tests\ndef _get_temporary_directory():\n    _uuid = str(uuid.uuid4())\n    return os.path.abspath(os.path.join(os.path.dirname(__name__), f'temporary-{_uuid}'))\n"
  },
  {
    "path": "tox.ini",
    "content": "[tox]\nenvlist = py{310,311,312,313,314},ruff,mypy\nskipsdist = True\n\n[testenv]\nrunner = uv-venv-lock-runner\ndependency_groups = test\ncommands =\n  {envpython} -m pytest --cov=gokart --cov-report=xml -vv {posargs:}\n\n[testenv:ruff]\ndependency_groups = lint\ncommands =\n  ruff check {posargs:}\n  ruff format --check {posargs:}\n\n[testenv:mypy]\ndependency_groups = lint\ncommands = mypy gokart test {posargs:}\n"
  }
]