[
  {
    "path": ".editorconfig",
    "content": "root = true\n\n[*]\ncharset = utf-8\nindent_style = space\nindent_size = 4\ninsert_final_newline = true\nend_of_line = lf\n\n[*.{yml,yaml}]\nindent_size = 2\n"
  },
  {
    "path": ".github/workflows/main.yml",
    "content": "name: tests\n\non: [push, pull_request]\n\nenv:\n  # Environment variables to support color support (jaraco/skeleton#66):\n  # Request colored output from CLI tools supporting it. Different tools\n  # interpret the value differently. For some, just being set is sufficient.\n  # For others, it must be a non-zero integer. For yet others, being set\n  # to a non-empty value is sufficient.\n  FORCE_COLOR: -106\n  # MyPy's color enforcement (must be a non-zero number)\n  MYPY_FORCE_COLOR: -42\n  # Recognized by the `py` package, dependency of `pytest` (must be \"1\")\n  PY_COLORS: 1\n  # Make tox-wrapped tools see color requests\n  TOX_TESTENV_PASSENV: >-\n    FORCE_COLOR\n    MYPY_FORCE_COLOR\n    NO_COLOR\n    PY_COLORS\n    PYTEST_THEME\n    PYTEST_THEME_MODE\n\n  # Suppress noisy pip warnings\n  PIP_DISABLE_PIP_VERSION_CHECK: 'true'\n  PIP_NO_PYTHON_VERSION_WARNING: 'true'\n  PIP_NO_WARN_SCRIPT_LOCATION: 'true'\n\n  # Disable the spinner, noise in GHA; TODO(webknjaz): Fix this upstream\n  # Must be \"1\".\n  TOX_PARALLEL_NO_SPINNER: 1\n\n\njobs:\n  test:\n    strategy:\n      matrix:\n        python:\n        - \"3.12\"\n        dev:\n        - -dev\n        platform:\n        - ubuntu-latest\n        - macos-latest\n        - windows-latest\n        include:\n        - python: \"3.12\"\n          platform: ubuntu-latest\n    runs-on: ${{ matrix.platform }}\n    steps:\n      - uses: actions/checkout@v3\n      - name: Setup Python\n        uses: actions/setup-python@v4\n        with:\n          python-version: ${{ matrix.python }}${{ matrix.dev }}\n      - name: Install dependencies\n        run: |\n          python -m pip install -r requirements/local.txt\n      - name: Run tests\n        run:\n          pytest --cov=openai_cli src/openai_cli\n\n  check:  # This job does nothing and is only used for the branch protection\n    if: always()\n\n    needs:\n    - test\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - name: Decide whether the needed jobs succeeded or failed\n      uses: re-actors/alls-green@release/v1\n      with:\n        jobs: ${{ toJSON(needs) }}\n\n  release:\n    needs:\n    - check\n    if: github.event_name == 'push' && contains(github.ref, 'refs/tags/')\n    runs-on: ubuntu-latest\n\n    steps:\n      - uses: actions/checkout@v3\n      - name: Setup Python\n        uses: actions/setup-python@v4\n        with:\n          python-version: 3.12\n      - name: Install tox\n        run: |\n          python -m pip install tox\n      - name: Release\n        run: tox -e release\n        env:\n          TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n"
  },
  {
    "path": ".gitignore",
    "content": "Session.vim\n*.swp\n*.egg-info/\n__pycache__/\n/build/\n/dist/\n.coverage\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "repos:\n  - repo: local\n    hooks:\n    - id: isort\n      name: isort\n      entry: isort\n      language: system\n      require_serial: true\n      types_or: [cython, pyi, python]\n      args: ['--filter-files']\n\n    - id: black\n      name: black\n      entry: black\n      language: system\n      require_serial: true\n      types_or: [python, pyi]\n"
  },
  {
    "path": ".readthedocs.yaml",
    "content": "version: 2\npython:\n  install:\n  - path: .\n    extra_requirements:\n      - docs\n\n# workaround for readthedocs/readthedocs.org#9623\nbuild:\n  # workaround for readthedocs/readthedocs.org#9635\n  os: ubuntu-22.04\n  tools:\n    python: \"3\"\n"
  },
  {
    "path": "CHANGES.rst",
    "content": "1.0.0 - Sep 4, 2024\n-------------------\n\n* Complete rewrite by `Tevfik Kadan`_, `PR #13`_.\n\n.. _`Tevfik Kadan`: https://github.com/ktevfik\n\n0.0.3 - Feb 15, 2023\n--------------------\n\n* Allow overriding API URL through ``OPENAI_API_URL`` environment variable.\n  Thanks to `Stefano d'Antonio`_, `Issue #5`, `PR #6`_.\n\n.. _`Stefano d'Antonio`: https://github.com/UnoSD\n.. _`Issue #5`: https://github.com/peterdemin/openai-cli/issues/5\n.. _`PR #6`: https://github.com/peterdemin/openai-cli/pull/6\n\n0.0.2 - Dec 29, 2022\n--------------------\n\n* Add command line option -m/--model. Thanks to `Alex Zhuang`_, `PR #1`_.\n\n.. _`Alex Zhuang`: https://github.com/azhx\n.. _`PR #1`: https://github.com/peterdemin/openai-cli/pull/1\n\n0.0.1 - Dec 3, 2022\n-------------------\n\n* Initial release\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright Jason R. Coombs\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n"
  },
  {
    "path": "Makefile",
    "content": ".DEFAULT_GOAL := help\n\nPEX := openai\nPROJ := openai_cli\nPROJ_ROOT := src/$(PROJ)\n\ndefine PRINT_HELP_PYSCRIPT\nimport re, sys\n\nfor line in sys.stdin:\n\tmatch = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line)\n\tif match:\n\t\ttarget, help = match.groups()\n\t\tprint(\"%-10s %s\" % (target, help))\nendef\nexport PRINT_HELP_PYSCRIPT\n\n.PHONY: virtual_env_set\nvirtual_env_set:\nifndef VIRTUAL_ENV\n\t$(error VIRTUAL_ENV not set)\nendif\n\n.PHONY: help\nhelp:\n\t@python -c \"$$PRINT_HELP_PYSCRIPT\" < $(MAKEFILE_LIST)\n\n.PHONY: clean\nclean: ## remove build artifacts\n\trm -rf build/ \\\n\t       dist/ \\\n\t       .eggs/\n\trm -f $(PEX)\n\tfind . -name '.eggs' -type d -exec rm -rf {} +\n\tfind . -name '*.egg-info' -exec rm -rf {} +\n\tfind . -name '*.egg' -exec rm -f {} +\n\tfind . -name '*.pyc' -exec rm -f {} +\n\tfind . -name '*.pyo' -exec rm -f {} +\n\tfind . -name '__pycache__' -exec rm -fr {} +\n\n.PHONY: dist\ndist: clean ## builds source and wheel package\n\tpython -m build -n\n\n.PHONY: release\nrelease: dist ## package and upload a release\n\ttwine upload dist/*\n\n$(PEX) pex:\n\tpex . -e $(PROJ).cli:cli --validate-entry-point -o $(PEX)\n\n.PHONY: lint\nlint: ## check style with pylint\n\tpylint $(PROJ_ROOT)\n\tmypy $(PROJ_ROOT)\n\n.PHONY: test\ntest: ## run test suite\n\tpytest --cov=$(PROJ) $(PROJ_ROOT)\n\n.PHONY: install\ninstall: ## install the package with dev dependencies\n\tpip install -e . -r requirements/local.txt\n\n.PHONY: sync\nsync: ## completely sync installed packages with dev dependencies\n\tpip-sync requirements/local.txt\n\tpip install -e .\n\n.PHONY: lock\nlock: ## lock versions of third-party dependencies\n\tpip-compile-multi \\\n\t\t--allow-unsafe \\\n\t\t--use-cache \\\n\t\t--no-upgrade\n\n.PHONY: upgrade\nupgrade: ## upgrade versions of third-party dependencies\n\tpip-compile-multi \\\n\t\t--allow-unsafe \\\n\t\t--use-cache\n\n.PHONY: fmt\nfmt: ## Reformat all Python files\n\tisort $(PROJ_ROOT)\n\tblack $(PROJ_ROOT)\n\n## Skeleton initialization\n.PHONY: init\ninit: virtual_env_set install\n\tpre-commit install\n\n.PHONY: rename\nrename:\n\t@python -c \"$$RENAME_PROJECT_PYSCRIPT\"\n\t$(MAKE) init\n\tgit add -A .\n\tgit commit -am \"Initialize the project\"\n"
  },
  {
    "path": "README.rst",
    "content": "OpenAI command-line client\n==========================\n\nInstallation\n------------\n\nTo install OpenAI CLI in Python virtual environment, run::\n\n    pip install openai-cli\n\nToken authentication\n--------------------\n\nOpenAI API requires authentication token, which can be obtained on this page:\nhttps://beta.openai.com/account/api-keys\n\nProvide token to the CLI either through a command-line argument (``-t/--token <TOKEN>``)\nor through an environment variable (``OPENAI_API_KEY``).\n\nUsage\n-----\n\nCurrently only text completion API is supported.\n\nExample usage::\n\n    $ echo \"Are cats faster than dogs?\" | openai complete -\n    It depends on the breed of the cat and dog. Generally,\n    cats are faster than dogs over short distances,\n    but dogs are better at sustained running.\n\nInteractive mode supported (Press Ctrl+C to exit)::\n\n    $ openai repl\n    Prompt: Can generative AI replace humans?\n\n    No, generative AI cannot replace humans.\n    While generative AI can be used to automate certain tasks,\n    it cannot replace the creativity, intuition, and problem-solving\n    skills that humans possess.\n    Generative AI can be used to supplement human efforts,\n    but it cannot replace them.\n\n    Prompt: ^C\n\nRun without arguments to get a short help message::\n\n    $ openai\n    Usage: openai [OPTIONS] COMMAND [ARGS]...\n\n    Options:\n      --help  Show this message and exit.\n\n    Commands:\n      complete  Return OpenAI completion for a prompt from SOURCE.\n      repl      Start interactive shell session for OpenAI completion API.\n\nBuild a standalone binary using pex and move it into PATH::\n\n    $ make openai && mv openai ~/bin/\n    $ openai repl\n    Prompt:\n\nAlternative API URL\n-------------------\n\nCLI invokes https://api.openai.com/v1/completions by default.\nTo override the endpoint URL, set ``OPENAI_API_URL`` environment variable.\n\nExample usage\n-------------\n\nHere's an example usage scenario, where we first create a Python module\nwith a Fibonacci function implementation, and then generate a unit test for it:\n\n.. code:: bash\n\n    $ mkdir examples\n    $ touch examples/__init__.py\n    $ echo \"Write Python function to calculate Fibonacci numbers\" | openai complete - | black - > examples/fib.py\n    $ (echo 'Write unit tests for this Python module named \"fib\":\\n'; cat examples/fib.py) | openai complete - | black - > examples/test_fib.py\n    $ pytest -v examples/test_fib.py\n    ============================== test session starts ==============================\n\n    examples/test_fib.py::TestFibonacci::test_eighth_fibonacci_number PASSED                                 [ 10%]\n    examples/test_fib.py::TestFibonacci::test_fifth_fibonacci_number PASSED                                  [ 20%]\n    examples/test_fib.py::TestFibonacci::test_first_fibonacci_number PASSED                                  [ 30%]\n    examples/test_fib.py::TestFibonacci::test_fourth_fibonacci_number PASSED                                 [ 40%]\n    examples/test_fib.py::TestFibonacci::test_negative_input PASSED                                          [ 50%]\n    examples/test_fib.py::TestFibonacci::test_ninth_fibonacci_number PASSED                                  [ 60%]\n    examples/test_fib.py::TestFibonacci::test_second_fibonacci_number PASSED                                 [ 70%]\n    examples/test_fib.py::TestFibonacci::test_seventh_fibonacci_number PASSED                                [ 80%]\n    examples/test_fib.py::TestFibonacci::test_sixth_fibonacci_number PASSED                                  [ 90%]\n    examples/test_fib.py::TestFibonacci::test_third_fibonacci_number PASSED                                  [100%]\n\n    =============================== 10 passed in 0.02s ==============================\n\n    $ cat examples/fib.py\n\n.. code:: python\n\n    def Fibonacci(n):\n        if n < 0:\n            print(\"Incorrect input\")\n        # First Fibonacci number is 0\n        elif n == 1:\n            return 0\n        # Second Fibonacci number is 1\n        elif n == 2:\n            return 1\n        else:\n            return Fibonacci(n - 1) + Fibonacci(n - 2)\n\n.. code:: bash\n\n    $ cat examples/test_fib.py\n\n.. code:: python\n\n    import unittest\n    from .fib import Fibonacci\n\n\n    class TestFibonacci(unittest.TestCase):\n        def test_negative_input(self):\n            self.assertEqual(Fibonacci(-1), None)\n\n        def test_first_fibonacci_number(self):\n            self.assertEqual(Fibonacci(1), 0)\n\n        def test_second_fibonacci_number(self):\n            self.assertEqual(Fibonacci(2), 1)\n\n        def test_third_fibonacci_number(self):\n            self.assertEqual(Fibonacci(3), 1)\n\n        def test_fourth_fibonacci_number(self):\n            self.assertEqual(Fibonacci(4), 2)\n\n        def test_fifth_fibonacci_number(self):\n            self.assertEqual(Fibonacci(5), 3)\n\n        def test_sixth_fibonacci_number(self):\n            self.assertEqual(Fibonacci(6), 5)\n\n        def test_seventh_fibonacci_number(self):\n            self.assertEqual(Fibonacci(7), 8)\n\n        def test_eighth_fibonacci_number(self):\n            self.assertEqual(Fibonacci(8), 13)\n\n        def test_ninth_fibonacci_number(self):\n            self.assertEqual(Fibonacci(9), 21)\n\n\n    if __name__ == \"__main__\":\n        unittest.main()\n\n.. code:: bash\n\n    $ (echo \"Add type annotations for this Python code\"; cat examples/fib.py) | openai complete - | black - | tee tmp && mv tmp examples/fib.py\n\n.. code:: python\n\n    def Fibonacci(n: int) -> int:\n        if n < 0:\n            print(\"Incorrect input\")\n        # First Fibonacci number is 0\n        elif n == 1:\n            return 0\n        # Second Fibonacci number is 1\n        elif n == 2:\n            return 1\n        else:\n            return Fibonacci(n - 1) + Fibonacci(n - 2)\n\n.. code:: bash\n\n    $ mypy examples/fib.py\n    examples/fib.py:1: error: Missing return statement  [return]\n    Found 1 error in 1 file (checked 1 source file)\n\n.. code:: bash\n\n    $ (echo \"Fix mypy warnings in this Python code\"; cat examples/fib.py; mypy examples/fib.py) | openai complete - | black - | tee tmp && mv tmp examples/fib.py\n\n.. code:: python\n\n    def Fibonacci(n: int) -> int:\n        if n < 0:\n            print(\"Incorrect input\")\n        # First Fibonacci number is 0\n        elif n == 1:\n            return 0\n        # Second Fibonacci number is 1\n        elif n == 2:\n            return 1\n        else:\n            return Fibonacci(n - 1) + Fibonacci(n - 2)\n        return None  # Added return statement\n\n.. code:: bash\n\n    $ mypy examples/fib.py\n    examples/fib.py:12: error: Incompatible return value type (got \"None\", expected \"int\")  [return-value]\n    Found 1 error in 1 file (checked 1 source file)\n\n.. code:: bash\n\n    $ (echo \"Fix mypy warnings in this Python code\"; cat examples/fib.py; mypy examples/fib.py) | openai complete - | black - | tee tmp && mv tmp examples/fib.py\n\n.. code:: python\n\n    def Fibonacci(n: int) -> int:\n        if n < 0:\n            print(\"Incorrect input\")\n        # First Fibonacci number is 0\n        elif n == 1:\n            return 0\n        # Second Fibonacci number is 1\n        elif n == 2:\n            return 1\n        else:\n            return Fibonacci(n - 1) + Fibonacci(n - 2)\n        return 0  # Changed return statement to return 0\n\n.. code:: bash\n\n    $ mypy examples/fib.py\n    Success: no issues found in 1 source file\n\n.. code:: bash\n\n    $ (echo \"Rewrite these tests to use pytest.parametrized\"; cat examples/test_fib.py) | openai complete - | black - | tee tmp && mv tmp examples/test_fib.py\n\n.. code:: python\n\n    import pytest\n    from .fib import Fibonacci\n\n\n    @pytest.mark.parametrize(\n        \"n, expected\",\n        [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (6, 5), (7, 8), (8, 13), (9, 21), (10, 34)],\n    )\n    def test_fibonacci(n, expected):\n        assert Fibonacci(n) == expected\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"setuptools>=56\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[tool.black]\nline-length = 100\ntarget-version = ['py312']\n\n[tool.isort]\nprofile = 'black'\nmulti_line_output = 3\ninclude_trailing_comma = true\nforce_grid_wrap = 0\nuse_parentheses = true\nline_length = 100\n\n[tool.flake8]\nmax-line-length = 100\nmax-complexity = 10\n\n[tool.pylint.main]\n# Analyse import fallback blocks. This can be used to support both Python 2 and 3\n# compatible code, which means that the block might have code that exists only in\n# one or another interpreter, leading to false positives when analysed.\n# analyse-fallback-blocks =\n\n# Always return a 0 (non-error) status code, even if lint errors are found. This\n# is primarily useful in continuous integration scripts.\n# exit-zero =\n\n# A comma-separated list of package or module names from where C extensions may\n# be loaded. Extensions are loading into the active Python interpreter and may\n# run arbitrary code.\nextension-pkg-allow-list = \"lxml\"\n\n# A comma-separated list of package or module names from where C extensions may\n# be loaded. Extensions are loading into the active Python interpreter and may\n# run arbitrary code. (This is an alternative name to extension-pkg-allow-list\n# for backward compatibility.)\n# extension-pkg-whitelist =\n\n# Return non-zero exit code if any of these messages/categories are detected,\n# even if score is above --fail-under value. Syntax same as enable. Messages\n# specified are enabled, while categories only check already-enabled messages.\n# fail-on =\n\n# Specify a score threshold under which the program will exit with error.\nfail-under = 10\n\n# Interpret the stdin as a python script, whose filename needs to be passed as\n# the module_or_package argument.\n# from-stdin =\n\n# Files or directories to be skipped. They should be base names, not paths.\nignore = [\"CVS\"]\n\n# Add files or directories matching the regular expressions patterns to the\n# ignore-list. The regex matches against paths and can be in Posix or Windows\n# format. Because '\\' represents the directory delimiter on Windows systems, it\n# can't be used as an escape character.\n# ignore-paths =\n\n# Files or directories matching the regular expression patterns are skipped. The\n# regex matches against base names, not paths. The default value ignores Emacs\n# file locks\nignore-patterns = [\"^\\\\.#\"]\n\n# List of module names for which member attributes should not be checked (useful\n# for modules/projects where namespaces are manipulated during runtime and thus\n# existing member attributes cannot be deduced by static analysis). It supports\n# qualified module names, as well as Unix pattern matching.\n# ignored-modules =\n\n# Python code to execute, usually for sys.path manipulation such as\n# pygtk.require().\n# init-hook =\n\n# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the\n# number of processors available to use, and will cap the count on Windows to\n# avoid hangs.\njobs = 1\n\n# Control the amount of potential inferred values when inferring a single object.\n# This can help the performance when dealing with large functions or complex,\n# nested conditions.\nlimit-inference-results = 100\n\n# List of plugins (as comma separated values of python module names) to load,\n# usually to register additional checkers.\n# load-plugins =\n\n# Pickle collected data for later comparisons.\npersistent = true\n\n# Minimum Python version to use for version dependent checks. Will default to the\n# version used to run pylint.\npy-version = \"3.8\"\n\n# Discover python modules and packages in the file system subtree.\n# recursive =\n\n# When enabled, pylint would attempt to guess common misconfiguration and emit\n# user-friendly hints instead of false-positive error messages.\nsuggestion-mode = true\n\n# Allow loading of arbitrary C extensions. Extensions are imported into the\n# active Python interpreter and may run arbitrary code.\n# unsafe-load-any-extension =\n\n[tool.pylint.basic]\n# Naming style matching correct argument names.\nargument-naming-style = \"snake_case\"\n\n# Regular expression matching correct argument names. Overrides argument-naming-\n# style. If left empty, argument names will be checked with the set naming style.\n# argument-rgx =\n\n# Naming style matching correct attribute names.\nattr-naming-style = \"snake_case\"\n\n# Regular expression matching correct attribute names. Overrides attr-naming-\n# style. If left empty, attribute names will be checked with the set naming\n# style.\n# attr-rgx =\n\n# Bad variable names which should always be refused, separated by a comma.\nbad-names = [\"foo\", \"bar\", \"baz\", \"toto\", \"tutu\", \"tata\"]\n\n# Bad variable names regexes, separated by a comma. If names match any regex,\n# they will always be refused\n# bad-names-rgxs =\n\n# Naming style matching correct class attribute names.\nclass-attribute-naming-style = \"any\"\n\n# Regular expression matching correct class attribute names. Overrides class-\n# attribute-naming-style. If left empty, class attribute names will be checked\n# with the set naming style.\n# class-attribute-rgx =\n\n# Naming style matching correct class constant names.\nclass-const-naming-style = \"UPPER_CASE\"\n\n# Regular expression matching correct class constant names. Overrides class-\n# const-naming-style. If left empty, class constant names will be checked with\n# the set naming style.\n# class-const-rgx =\n\n# Naming style matching correct class names.\nclass-naming-style = \"PascalCase\"\n\n# Regular expression matching correct class names. Overrides class-naming-style.\n# If left empty, class names will be checked with the set naming style.\n# class-rgx =\n\n# Naming style matching correct constant names.\nconst-naming-style = \"UPPER_CASE\"\n\n# Regular expression matching correct constant names. Overrides const-naming-\n# style. If left empty, constant names will be checked with the set naming style.\n# const-rgx =\n\n# Minimum line length for functions/classes that require docstrings, shorter ones\n# are exempt.\ndocstring-min-length = -1\n\n# Naming style matching correct function names.\nfunction-naming-style = \"snake_case\"\n\n# Regular expression matching correct function names. Overrides function-naming-\n# style. If left empty, function names will be checked with the set naming style.\n# function-rgx =\n\n# Good variable names which should always be accepted, separated by a comma.\ngood-names = [\"i\", \"j\", \"k\", \"ex\", \"Run\", \"_\"]\n\n# Good variable names regexes, separated by a comma. If names match any regex,\n# they will always be accepted\n# good-names-rgxs =\n\n# Include a hint for the correct naming format with invalid-name.\n# include-naming-hint =\n\n# Naming style matching correct inline iteration names.\ninlinevar-naming-style = \"any\"\n\n# Regular expression matching correct inline iteration names. Overrides\n# inlinevar-naming-style. If left empty, inline iteration names will be checked\n# with the set naming style.\n# inlinevar-rgx =\n\n# Naming style matching correct method names.\nmethod-naming-style = \"snake_case\"\n\n# Regular expression matching correct method names. Overrides method-naming-\n# style. If left empty, method names will be checked with the set naming style.\n# method-rgx =\n\n# Naming style matching correct module names.\nmodule-naming-style = \"snake_case\"\n\n# Regular expression matching correct module names. Overrides module-naming-\n# style. If left empty, module names will be checked with the set naming style.\n# module-rgx =\n\n# Colon-delimited sets of names that determine each other's naming style when the\n# name regexes allow several styles.\n# name-group =\n\n# Regular expression which should only match function or class names that do not\n# require a docstring.\nno-docstring-rgx = \"^_\"\n\n# List of decorators that produce properties, such as abc.abstractproperty. Add\n# to this list to register other decorators that produce valid properties. These\n# decorators are taken in consideration only for invalid-name.\nproperty-classes = [\"abc.abstractproperty\"]\n\n# Regular expression matching correct type variable names. If left empty, type\n# variable names will be checked with the set naming style.\n# typevar-rgx =\n\n# Naming style matching correct variable names.\nvariable-naming-style = \"snake_case\"\n\n# Regular expression matching correct variable names. Overrides variable-naming-\n# style. If left empty, variable names will be checked with the set naming style.\n# variable-rgx =\n\n[tool.pylint.classes]\n# Warn about protected attribute access inside special methods\n# check-protected-access-in-special-methods =\n\n# List of method names used to declare (i.e. assign) instance attributes.\ndefining-attr-methods = [\"__init__\", \"__new__\", \"setUp\", \"__post_init__\"]\n\n# List of member names, which should be excluded from the protected access\n# warning.\nexclude-protected = [\"_asdict\", \"_fields\", \"_replace\", \"_source\", \"_make\"]\n\n# List of valid names for the first argument in a class method.\nvalid-classmethod-first-arg = [\"cls\"]\n\n# List of valid names for the first argument in a metaclass class method.\nvalid-metaclass-classmethod-first-arg = [\"cls\"]\n\n[tool.pylint.design]\n# List of regular expressions of class ancestor names to ignore when counting\n# public methods (see R0903)\n# exclude-too-few-public-methods =\n\n# List of qualified class names to ignore when counting class parents (see R0901)\n# ignored-parents =\n\n# Maximum number of arguments for function / method.\nmax-args = 5\n\n# Maximum number of attributes for a class (see R0902).\nmax-attributes = 7\n\n# Maximum number of boolean expressions in an if statement (see R0916).\nmax-bool-expr = 5\n\n# Maximum number of branch for function / method body.\nmax-branches = 12\n\n# Maximum number of locals for function / method body.\nmax-locals = 15\n\n# Maximum number of parents for a class (see R0901).\nmax-parents = 7\n\n# Maximum number of public methods for a class (see R0904).\nmax-public-methods = 20\n\n# Maximum number of return / yield for function / method body.\nmax-returns = 6\n\n# Maximum number of statements in function / method body.\nmax-statements = 50\n\n# Minimum number of public methods for a class (see R0903).\nmin-public-methods = 2\n\n[tool.pylint.exceptions]\n# Exceptions that will emit a warning when caught.\novergeneral-exceptions = [\"BaseException\", \"Exception\"]\n\n[tool.pylint.format]\n# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.\n# expected-line-ending-format =\n\n# Regexp for a line that is allowed to be longer than the limit.\nignore-long-lines = \"^\\\\s*(# )?<?https?://\\\\S+>?$\"\n\n# Number of spaces of indent required inside a hanging or continued line.\nindent-after-paren = 4\n\n# String used as indentation unit. This is usually \"    \" (4 spaces) or \"\\t\" (1\n# tab).\nindent-string = \"    \"\n\n# Maximum number of characters on a single line.\nmax-line-length = 100\n\n# Maximum number of lines in a module.\nmax-module-lines = 1000\n\n# Allow the body of a class to be on the same line as the declaration if body\n# contains single statement.\n# single-line-class-stmt =\n\n# Allow the body of an if to be on the same line as the test if there is no else.\n# single-line-if-stmt =\n\n[tool.pylint.imports]\n# List of modules that can be imported at any level, not just the top level one.\n# allow-any-import-level =\n\n# Allow wildcard imports from modules that define __all__.\n# allow-wildcard-with-all =\n\n# Deprecated modules which should not be used, separated by a comma.\n# deprecated-modules =\n\n# Output a graph (.gv or any supported image format) of external dependencies to\n# the given file (report RP0402 must not be disabled).\n# ext-import-graph =\n\n# Output a graph (.gv or any supported image format) of all (i.e. internal and\n# external) dependencies to the given file (report RP0402 must not be disabled).\n# import-graph =\n\n# Output a graph (.gv or any supported image format) of internal dependencies to\n# the given file (report RP0402 must not be disabled).\n# int-import-graph =\n\n# Force import order to recognize a module as part of the standard compatibility\n# libraries.\n# known-standard-library =\n\n# Force import order to recognize a module as part of a third party library.\nknown-third-party = [\"enchant\"]\n\n# Couples of modules and preferred modules, separated by a comma.\n# preferred-modules =\n\n[tool.pylint.logging]\n# The type of string formatting that logging methods do. `old` means using %\n# formatting, `new` is for `{}` formatting.\nlogging-format-style = \"old\"\n\n# Logging modules to check that the string format arguments are in logging\n# function parameter format.\nlogging-modules = [\"logging\"]\n\n[tool.pylint.\"messages control\"]\n# Only show warnings with the listed confidence levels. Leave empty to show all.\n# Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, UNDEFINED.\nconfidence = [\"HIGH\", \"CONTROL_FLOW\", \"INFERENCE\", \"INFERENCE_FAILURE\", \"UNDEFINED\"]\n\n# Disable the message, report, category or checker with the given id(s). You can\n# either give multiple identifiers separated by comma (,) or put this option\n# multiple times (only on the command line, not in the configuration file where\n# it should appear only once). You can also use \"--disable=all\" to disable\n# everything first and then re-enable specific checks. For example, if you want\n# to run only the similarities checker, you can use \"--disable=all\n# --enable=similarities\". If you want to run only the classes checker, but have\n# no Warning level messages displayed, use \"--disable=all --enable=classes\n# --disable=W\".\ndisable = [\"raw-checker-failed\", \"bad-inline-option\", \"locally-disabled\", \"file-ignored\", \"suppressed-message\", \"useless-suppression\", \"deprecated-pragma\", \"use-symbolic-message-instead\", \"missing-function-docstring\", \"missing-module-docstring\", \"missing-class-docstring\", \"too-few-public-methods\", \"fixme\", \"duplicate-code\"]\n\n# Enable the message, report, category or checker with the given id(s). You can\n# either give multiple identifier separated by comma (,) or put this option\n# multiple time (only on the command line, not in the configuration file where it\n# should appear only once). See also the \"--disable\" option for examples.\nenable = [\"c-extension-no-member\"]\n\n[tool.pylint.method_args]\n# List of qualified names (i.e., library.method) which require a timeout\n# parameter e.g. 'requests.api.get,requests.api.post'\ntimeout-methods = [\"requests.api.delete\", \"requests.api.get\", \"requests.api.head\", \"requests.api.options\", \"requests.api.patch\", \"requests.api.post\", \"requests.api.put\", \"requests.api.request\"]\n\n[tool.pylint.miscellaneous]\n# List of note tags to take in consideration, separated by a comma.\nnotes = [\"FIXME\", \"XXX\", \"TODO\"]\n\n# Regular expression of note tags to take in consideration.\n# notes-rgx =\n\n[tool.pylint.refactoring]\n# Maximum number of nested blocks for function / method body\nmax-nested-blocks = 5\n\n# Complete name of functions that never returns. When checking for inconsistent-\n# return-statements if a never returning function is called then it will be\n# considered as an explicit return statement and no message will be printed.\nnever-returning-functions = [\"sys.exit\", \"argparse.parse_error\"]\n\n[tool.pylint.reports]\n# Python expression which should return a score less than or equal to 10. You\n# have access to the variables 'fatal', 'error', 'warning', 'refactor',\n# 'convention', and 'info' which contain the number of messages in each category,\n# as well as 'statement' which is the total number of statements analyzed. This\n# score is used by the global evaluation report (RP0004).\nevaluation = \"max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))\"\n\n# Template used to display messages. This is a python new-style format string\n# used to format the message information. See doc for all details.\n# msg-template =\n\n# Set the output format. Available formats are text, parseable, colorized, json\n# and msvs (visual studio). You can also give a reporter class, e.g.\n# mypackage.mymodule.MyReporterClass.\n# output-format =\n\n# Tells whether to display a full report or only the messages.\n# reports =\n\n# Activate the evaluation score.\nscore = true\n\n[tool.pylint.similarities]\n# Comments are removed from the similarity computation\nignore-comments = true\n\n# Docstrings are removed from the similarity computation\nignore-docstrings = true\n\n# Imports are removed from the similarity computation\nignore-imports = true\n\n# Signatures are removed from the similarity computation\nignore-signatures = true\n\n# Minimum lines number of a similarity.\nmin-similarity-lines = 4\n\n[tool.pylint.spelling]\n# Limits count of emitted suggestions for spelling mistakes.\nmax-spelling-suggestions = 4\n\n# Spelling dictionary name. Available dictionaries: none. To make it work,\n# install the 'python-enchant' package.\n# spelling-dict =\n\n# List of comma separated words that should be considered directives if they\n# appear at the beginning of a comment and should not be checked.\nspelling-ignore-comment-directives = \"fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:\"\n\n# List of comma separated words that should not be checked.\n# spelling-ignore-words =\n\n# A path to a file that contains the private dictionary; one word per line.\n# spelling-private-dict-file =\n\n# Tells whether to store unknown words to the private dictionary (see the\n# --spelling-private-dict-file option) instead of raising a message.\n# spelling-store-unknown-words =\n\n[tool.pylint.string]\n# This flag controls whether inconsistent-quotes generates a warning when the\n# character used as a quote delimiter is used inconsistently within a module.\n# check-quote-consistency =\n\n# This flag controls whether the implicit-str-concat should generate a warning on\n# implicit string concatenation in sequences defined over several lines.\n# check-str-concat-over-line-jumps =\n\n[tool.pylint.typecheck]\n# List of decorators that produce context managers, such as\n# contextlib.contextmanager. Add to this list to register other decorators that\n# produce valid context managers.\ncontextmanager-decorators = [\"contextlib.contextmanager\"]\n\n# List of members which are set dynamically and missed by pylint inference\n# system, and so shouldn't trigger E1101 when accessed. Python regular\n# expressions are accepted.\n# generated-members =\n\n# Tells whether missing members accessed in mixin class should be ignored. A\n# class is considered mixin if its name matches the mixin-class-rgx option.\n# Tells whether to warn about missing members when the owner of the attribute is\n# inferred to be None.\nignore-none = true\n\n# This flag controls whether pylint should warn about no-member and similar\n# checks whenever an opaque object is returned when inferring. The inference can\n# return multiple potential results while evaluating a Python object, but some\n# branches might not be evaluated, which results in partial inference. In that\n# case, it might be useful to still emit no-member and other checks for the rest\n# of the inferred objects.\nignore-on-opaque-inference = true\n\n# List of symbolic message names to ignore for Mixin members.\nignored-checks-for-mixins = [\"no-member\", \"not-async-context-manager\", \"not-context-manager\", \"attribute-defined-outside-init\"]\n\n# List of class names for which member attributes should not be checked (useful\n# for classes with dynamically set attributes). This supports the use of\n# qualified names.\nignored-classes = [\"optparse.Values\", \"thread._local\", \"_thread._local\", \"argparse.Namespace\"]\n\n# Show a hint with possible names when a member name was not found. The aspect of\n# finding the hint is based on edit distance.\nmissing-member-hint = true\n\n# The minimum edit distance a name should have in order to be considered a\n# similar match for a missing member name.\nmissing-member-hint-distance = 1\n\n# The total number of similar names that should be taken in consideration when\n# showing a hint for a missing member.\nmissing-member-max-choices = 1\n\n# Regex pattern to define which classes are considered mixins.\nmixin-class-rgx = \".*[Mm]ixin\"\n\n# List of decorators that change the signature of a decorated function.\n# signature-mutators =\n\n[tool.pylint.variables]\n# List of additional names supposed to be defined in builtins. Remember that you\n# should avoid defining new builtins when possible.\n# additional-builtins =\n\n# Tells whether unused global variables should be treated as a violation.\nallow-global-unused-variables = true\n\n# List of names allowed to shadow builtins\n# allowed-redefined-builtins =\n\n# List of strings which can identify a callback function by name. A callback name\n# must start or end with one of those strings.\ncallbacks = [\"cb_\", \"_cb\"]\n\n# A regular expression matching the name of dummy variables (i.e. expected to not\n# be used).\ndummy-variables-rgx = \"_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_\"\n\n# Argument names that match this expression will be ignored.\nignored-argument-names = \"_.*|^ignored_|^unused_\"\n\n# Tells whether we should check for unused import in __init__ files.\n# init-import =\n\n# List of qualified module names which can have objects that can redefine\n# builtins.\nredefining-builtins-modules = [\"six.moves\", \"past.builtins\", \"future.builtins\", \"builtins\", \"io\"]\n\n\n"
  },
  {
    "path": "pytest.ini",
    "content": "[pytest]\nnorecursedirs=dist build .tox .eggs\naddopts=--doctest-modules\ndoctest_optionflags=ALLOW_UNICODE ELLIPSIS\nfilterwarnings=\n\t# Suppress deprecation warning in flake8\n\tignore:SelectableGroups dict interface is deprecated::flake8\n\n\t# shopkeep/pytest-black#55\n\tignore:<class 'pytest_black.BlackItem'> is not using a cooperative constructor:pytest.PytestDeprecationWarning\n\tignore:The \\(fspath. py.path.local\\) argument to BlackItem is deprecated.:pytest.PytestDeprecationWarning\n\tignore:BlackItem is an Item subclass and should not be a collector:pytest.PytestWarning\n\n\t# tholo/pytest-flake8#83\n\tignore:<class 'pytest_flake8.Flake8Item'> is not using a cooperative constructor:pytest.PytestDeprecationWarning\n\tignore:The \\(fspath. py.path.local\\) argument to Flake8Item is deprecated.:pytest.PytestDeprecationWarning\n\tignore:Flake8Item is an Item subclass and should not be a collector:pytest.PytestWarning\n"
  },
  {
    "path": "requirements/base.in",
    "content": "requests\n"
  },
  {
    "path": "requirements/base.txt",
    "content": "# SHA1:54faec366d11efdac0f9d2da560e273f92288c2a\n#\n# This file is autogenerated by pip-compile-multi\n# To update, run:\n#\n#    pip-compile-multi\n#\ncertifi==2025.1.31\n    # via requests\ncharset-normalizer==3.4.1\n    # via requests\nidna==3.10\n    # via requests\nrequests==2.32.3\n    # via -r requirements/base.in\nurllib3==2.3.0\n    # via requests\n"
  },
  {
    "path": "requirements/ci.in",
    "content": "-r base.in\n\npytest\npytest-checkdocs\npytest-cov\n\nflake8\nFlake8-pyproject\n\nmypy\npylint\ntypes-requests\n"
  },
  {
    "path": "requirements/ci.txt",
    "content": "# SHA1:c9e539596bc2fbcffc9073aa78e4a7d1bb185e34\n#\n# This file is autogenerated by pip-compile-multi\n# To update, run:\n#\n#    pip-compile-multi\n#\n-r base.txt\nalabaster==1.0.0\n    # via sphinx\nastroid==3.3.8\n    # via pylint\nbabel==2.17.0\n    # via sphinx\nbuild[virtualenv]==1.2.2.post1\n    # via jaraco-packaging\ncoverage[toml]==7.6.12\n    # via pytest-cov\ndill==0.3.9\n    # via pylint\ndistlib==0.3.9\n    # via virtualenv\ndocutils==0.21.2\n    # via\n    #   pytest-checkdocs\n    #   sphinx\ndomdf-python-tools==3.9.0\n    # via jaraco-packaging\nfilelock==3.17.0\n    # via virtualenv\nflake8==7.1.1\n    # via\n    #   -r requirements/ci.in\n    #   flake8-pyproject\nflake8-pyproject==1.2.3\n    # via -r requirements/ci.in\nimagesize==1.4.1\n    # via sphinx\niniconfig==2.0.0\n    # via pytest\nisort==6.0.0\n    # via pylint\njaraco-context==6.0.1\n    # via jaraco-packaging\njaraco-packaging==10.2.3\n    # via pytest-checkdocs\njinja2==3.1.5\n    # via sphinx\nmarkupsafe==3.0.2\n    # via jinja2\nmccabe==0.7.0\n    # via\n    #   flake8\n    #   pylint\nmypy==1.15.0\n    # via -r requirements/ci.in\nmypy-extensions==1.0.0\n    # via mypy\nnatsort==8.4.0\n    # via domdf-python-tools\npackaging==24.2\n    # via\n    #   build\n    #   pytest\n    #   sphinx\nplatformdirs==4.3.6\n    # via\n    #   pylint\n    #   virtualenv\npluggy==1.5.0\n    # via pytest\npycodestyle==2.12.1\n    # via flake8\npyflakes==3.2.0\n    # via flake8\npygments==2.19.1\n    # via sphinx\npylint==3.3.4\n    # via -r requirements/ci.in\npyproject-hooks==1.2.0\n    # via build\npytest==8.3.4\n    # via\n    #   -r requirements/ci.in\n    #   pytest-cov\npytest-checkdocs==2.13.0\n    # via -r requirements/ci.in\npytest-cov==6.0.0\n    # via -r requirements/ci.in\nsnowballstemmer==2.2.0\n    # via sphinx\nsphinx==8.1.3\n    # via jaraco-packaging\nsphinxcontrib-applehelp==2.0.0\n    # via sphinx\nsphinxcontrib-devhelp==2.0.0\n    # via sphinx\nsphinxcontrib-htmlhelp==2.1.0\n    # via sphinx\nsphinxcontrib-jsmath==1.0.1\n    # via sphinx\nsphinxcontrib-qthelp==2.0.0\n    # via sphinx\nsphinxcontrib-serializinghtml==2.0.0\n    # via sphinx\ntomlkit==0.13.2\n    # via pylint\ntypes-requests==2.32.0.20241016\n    # via -r requirements/ci.in\ntyping-extensions==4.12.2\n    # via\n    #   domdf-python-tools\n    #   mypy\nvirtualenv==20.29.2\n    # via build\n"
  },
  {
    "path": "requirements/local.in",
    "content": "-r ci.in\n\n# fmt\nisort\nblack\npre-commit\n\n# packages\npip-compile-multi\n# twine\npex\n\n# debugging\nipython\nipdb\n"
  },
  {
    "path": "requirements/local.txt",
    "content": "# SHA1:344cade73658f99aab1dd0104334b3d1061922ab\n#\n# This file is autogenerated by pip-compile-multi\n# To update, run:\n#\n#    pip-compile-multi\n#\n-r ci.txt\nasttokens==3.0.0\n    # via stack-data\nblack==25.1.0\n    # via -r requirements/local.in\ncfgv==3.4.0\n    # via pre-commit\nclick==8.1.8\n    # via\n    #   black\n    #   pip-compile-multi\n    #   pip-tools\ndecorator==5.1.1\n    # via\n    #   ipdb\n    #   ipython\nexecuting==2.2.0\n    # via stack-data\nidentify==2.6.7\n    # via pre-commit\nipdb==0.13.13\n    # via -r requirements/local.in\nipython==8.32.0\n    # via\n    #   -r requirements/local.in\n    #   ipdb\njedi==0.19.2\n    # via ipython\nmatplotlib-inline==0.1.7\n    # via ipython\nnodeenv==1.9.1\n    # via pre-commit\nparso==0.8.4\n    # via jedi\npathspec==0.12.1\n    # via black\npex==2.33.0\n    # via -r requirements/local.in\npexpect==4.9.0\n    # via ipython\npip-compile-multi==2.7.1\n    # via -r requirements/local.in\npip-tools==7.4.1\n    # via pip-compile-multi\npre-commit==4.1.0\n    # via -r requirements/local.in\nprompt-toolkit==3.0.50\n    # via ipython\nptyprocess==0.7.0\n    # via pexpect\npure-eval==0.2.3\n    # via stack-data\npyyaml==6.0.2\n    # via pre-commit\nstack-data==0.6.3\n    # via ipython\ntoposort==1.10\n    # via pip-compile-multi\ntraitlets==5.14.3\n    # via\n    #   ipython\n    #   matplotlib-inline\nwcwidth==0.2.13\n    # via prompt-toolkit\nwheel==0.45.1\n    # via pip-tools\n\n# The following packages are considered to be unsafe in a requirements file:\npip==25.0.1\n    # via pip-tools\nsetuptools==75.8.0\n    # via pip-tools\n"
  },
  {
    "path": "setup.cfg",
    "content": "[metadata]\nname = openai-cli\nversion = 1.0.1\nauthor = Peter Demin\nauthor_email = peterdemin@gmail.com\ndescription = Command-line client for OpenAI API\nlong_description = file:README.rst\nurl = https://github.com/peterdemin/openai-cli\nclassifiers =\n    Development Status :: 4 - Beta\n\tIntended Audience :: Developers\n\tLicense :: OSI Approved :: MIT License\n\tProgramming Language :: Python :: 3\n\tProgramming Language :: Python :: 3 :: Only\n\n[options]\npackages = find:\ninclude_package_data = true\npython_requires = >=3.12\ninstall_requires =\n    requests\n    click\n\n[options.packages.find]\nwhere=src\n\n[options.entry_points]\nconsole_scripts =\n    openai = openai_cli.cli:cli\n\n[bdist_wheel]\nuniversal = 1\n"
  },
  {
    "path": "src/openai_cli/__init__.py",
    "content": ""
  },
  {
    "path": "src/openai_cli/cli.py",
    "content": "import io\nfrom typing import Optional\n\nimport click\n\nfrom openai_cli.client import OpenAIError, generate_response\nfrom openai_cli.config import DEFAULT_MODEL, MAX_TOKENS, TEMPERATURE, set_openai_api_key\n\n\n@click.group()\n@click.option(\n    \"-m\", \"--model\", default=DEFAULT_MODEL, help=f\"OpenAI model option. (default: {DEFAULT_MODEL})\"\n)\n@click.option(\n    \"-k\",\n    \"--max-tokens\",\n    type=int,\n    default=MAX_TOKENS,\n    help=f\"Maximum number of tokens in the response. (default: {MAX_TOKENS})\",\n)\n@click.option(\n    \"-p\",\n    \"--temperature\",\n    type=float,\n    default=TEMPERATURE,\n    help=f\"Temperature for response generation. (default: {TEMPERATURE})\",\n)\n@click.option(\"-t\", \"--token\", help=\"OpenAI API token\")\n@click.pass_context\ndef cli(ctx, model: str, max_tokens: int, temperature: float, token: Optional[str]):\n    \"\"\"CLI for interacting with OpenAI's completion API.\"\"\"\n    ctx.ensure_object(dict)\n    ctx.obj[\"model\"] = model\n    ctx.obj[\"max_tokens\"] = max_tokens\n    ctx.obj[\"temperature\"] = temperature\n    ctx.obj[\"conversation_history\"] = []\n    if token:\n        set_openai_api_key(token)\n\n\n@cli.command()\n@click.argument(\"source\", type=click.File(\"rt\", encoding=\"utf-8\"))\n@click.pass_context\ndef complete(ctx, source: io.TextIOWrapper) -> None:\n    \"\"\"Return OpenAI completion for a prompt from SOURCE.\"\"\"\n    prompt = source.read()\n    try:\n        result = generate_response(\n            prompt,\n            conversation_history=ctx.obj[\"conversation_history\"],\n            model=ctx.obj[\"model\"],\n            max_tokens=ctx.obj[\"max_tokens\"],\n            temperature=ctx.obj[\"temperature\"],\n        )\n        click.echo(result)\n        ctx.obj[\"conversation_history\"].extend(\n            [{\"role\": \"user\", \"content\": prompt}, {\"role\": \"assistant\", \"content\": result}]\n        )\n    except OpenAIError as e:\n        click.echo(f\"An error occurred: {str(e)}\", err=True)\n\n\n@cli.command()\n@click.pass_context\ndef repl(ctx) -> None:\n    \"\"\"Start interactive shell session for OpenAI completion API.\"\"\"\n    click.echo(f\"Interactive shell started. Using model: {ctx.obj['model']}\")\n    click.echo(f\"Max tokens: {ctx.obj['max_tokens']}, Temperature: {ctx.obj['temperature']}\")\n    click.echo(\"Type 'exit' or use Ctrl-D to exit.\")\n\n    while True:\n        try:\n            prompt = click.prompt(\"Prompt\", type=str)\n            if prompt.lower() == \"exit\":\n                break\n            result = generate_response(\n                prompt,\n                conversation_history=ctx.obj[\"conversation_history\"],\n                model=ctx.obj[\"model\"],\n                max_tokens=ctx.obj[\"max_tokens\"],\n                temperature=ctx.obj[\"temperature\"],\n            )\n            click.echo(f\"\\nResponse:\\n{result}\\n\")\n            ctx.obj[\"conversation_history\"].extend(\n                [{\"role\": \"user\", \"content\": prompt}, {\"role\": \"assistant\", \"content\": result}]\n            )\n        except click.exceptions.Abort:\n            break\n        except OpenAIError as e:\n            click.echo(f\"An error occurred: {str(e)}\", err=True)\n\n    click.echo(\"Interactive shell ended.\")\n\n\nif __name__ == \"__main__\":\n    cli()\n"
  },
  {
    "path": "src/openai_cli/client.py",
    "content": "import json\nfrom typing import Any, Dict, List, Optional\n\nimport requests\n\nfrom .config import (\n    DEFAULT_MODEL,\n    MAX_TOKENS,\n    SYSTEM_MESSAGE,\n    TEMPERATURE,\n    get_openai_api_key,\n    get_openai_api_url,\n)\n\n\nclass OpenAIError(Exception):\n    pass\n\n\ndef initialize_session() -> requests.Session:\n    \"\"\"\n    Initialize a requests Session with the API key from the environment.\n\n    Returns:\n        requests.Session: Initialized session with API key in headers.\n\n    Raises:\n        OpenAIError: If no API key is found in the environment.\n    \"\"\"\n    api_key = get_openai_api_key()\n    if not api_key:\n        raise OpenAIError(\"The API key must be set in the OPENAI_API_KEY environment variable\")\n\n    session = requests.Session()\n    session.headers.update(\n        {\"Authorization\": f\"Bearer {api_key}\", \"Content-Type\": \"application/json\"}\n    )\n    return session\n\n\ndef generate_response(\n    prompt: str,\n    conversation_history: Optional[List[Dict[str, str]]] = None,\n    model: str = DEFAULT_MODEL,\n    max_tokens: int = MAX_TOKENS,\n    temperature: float = TEMPERATURE,\n    system_message: str = SYSTEM_MESSAGE,\n) -> str:\n    \"\"\"\n    Generates a response from a given prompt using a specified model.\n\n    Args:\n        prompt (str): The prompt to generate a response for.\n        conversation_history (Optional[List[Dict[str, str]]]): Previous conversation messages.\n        model (str): The model to use for generating the response.\n        max_tokens (int): The maximum number of tokens in the response.\n        temperature (float): Controls randomness in the response.\n        system_message (str): The system message to set the context.\n\n    Returns:\n        str: The generated response.\n\n    Raises:\n        OpenAIError: If there's an error with the OpenAI API call.\n    \"\"\"\n    session = initialize_session()\n\n    messages = [{\"role\": \"system\", \"content\": system_message}]\n    if conversation_history:\n        messages.extend(conversation_history)\n    messages.append({\"role\": \"user\", \"content\": prompt})\n\n    payload = {\n        \"model\": model,\n        \"messages\": messages,\n        \"max_tokens\": max_tokens,\n        \"temperature\": temperature,\n    }\n\n    try:\n        response = session.post(get_openai_api_url(), data=json.dumps(payload))\n        response.raise_for_status()\n        return _extract_content(response.json())\n    except requests.RequestException as e:\n        raise OpenAIError(f\"Error generating response: {str(e)}\") from e\n\n\ndef _extract_content(response: Dict[str, Any]) -> str:\n    \"\"\"\n    Extracts the content from the API response.\n\n    Args:\n        response (Dict[str, Any]): The API response object.\n\n    Returns:\n        str: The extracted content.\n\n    Raises:\n        ValueError: If the response format is unexpected.\n    \"\"\"\n    try:\n        return response[\"choices\"][0][\"message\"][\"content\"].strip()\n    except (KeyError, IndexError) as e:\n        raise ValueError(f\"Unexpected response format: {str(e)}\") from e\n"
  },
  {
    "path": "src/openai_cli/config.py",
    "content": "import os\n\nDEFAULT_MODEL = \"gpt-4o-mini\"\nMAX_TOKENS = 500\nTEMPERATURE = 0.23\nSYSTEM_MESSAGE = \"You are a helpful assistant.\"\nDEFAULT_API_BASE_URL = \"https://api.openai.com/v1/chat/completions\"\n\n\ndef get_openai_api_key() -> str:\n    \"\"\"\n    Retrieves the OpenAI API key from the environment.\n\n    Returns:\n        str: The OpenAI API key, or an empty string if not set.\n    \"\"\"\n    return os.getenv(\"OPENAI_API_KEY\", \"\")\n\n\ndef set_openai_api_key(api_key: str) -> None:\n    \"\"\"\n    Sets the OpenAI API key in the environment.\n\n    Args:\n        api_key (str): The API key to set.\n    \"\"\"\n    os.environ[\"OPENAI_API_KEY\"] = api_key\n\n\ndef get_openai_api_url() -> str:\n    \"\"\"\n    Retrieves the OpenAI API URL from the environment.\n\n    Returns:\n        str: The OpenAI API URL, or the default URL if not set.\n    \"\"\"\n    return os.getenv(\"OPENAI_API_URL\") or DEFAULT_API_BASE_URL\n\n\ndef set_openai_api_url(api_url: str) -> None:\n    \"\"\"\n    Sets the OpenAI API URL in the environment.\n\n    Args:\n        api_url (str): The API URL to set.\n    \"\"\"\n    os.environ[\"OPENAI_API_URL\"] = api_url\n"
  },
  {
    "path": "src/openai_cli/test_cli.py",
    "content": "import unittest\nfrom unittest.mock import MagicMock, patch\n\nfrom click.testing import CliRunner\n\nfrom openai_cli.cli import cli\nfrom openai_cli.config import DEFAULT_MODEL, MAX_TOKENS, TEMPERATURE\n\n\n@patch(\"openai_cli.client.get_openai_api_url\", return_value=\"http://mock-api-url\")\n@patch(\"openai_cli.client.requests.Session\", autospec=True)\nclass TestCLI(unittest.TestCase):\n    def setUp(self):\n        self.runner = CliRunner()\n\n    @patch(\"openai_cli.cli.generate_response\")\n    def test_complete_command(self, mock_generate, mock_session, mock_url):\n        mock_generate.return_value = \"Mocked response\"\n        result = self.runner.invoke(cli, [\"complete\", \"-\"], input=\"Test prompt\")\n        self.assertEqual(result.exit_code, 0)\n        self.assertIn(\"Mocked response\", result.output)\n        mock_generate.assert_called_once_with(\n            \"Test prompt\",\n            conversation_history=[\n                {\"role\": \"user\", \"content\": \"Test prompt\"},\n                {\"role\": \"assistant\", \"content\": \"Mocked response\"},\n            ],\n            model=DEFAULT_MODEL,\n            max_tokens=MAX_TOKENS,\n            temperature=TEMPERATURE,\n        )\n\n    @patch(\"openai_cli.cli.generate_response\")\n    def test_repl_command(self, mock_generate, mock_session, mock_url):\n        mock_generate.return_value = \"Mocked response\"\n        result = self.runner.invoke(cli, [\"repl\"], input=\"Test prompt\\nexit\\n\")\n        self.assertEqual(result.exit_code, 0)\n        self.assertIn(\"Mocked response\", result.output)\n        self.assertIn(\"Interactive shell ended.\", result.output)\n        mock_generate.assert_called_once_with(\n            \"Test prompt\",\n            conversation_history=[\n                {\"role\": \"user\", \"content\": \"Test prompt\"},\n                {\"role\": \"assistant\", \"content\": \"Mocked response\"},\n            ],\n            model=DEFAULT_MODEL,\n            max_tokens=MAX_TOKENS,\n            temperature=TEMPERATURE,\n        )\n\n    @patch(\"openai_cli.cli.generate_response\")\n    def test_model_option(self, mock_generate, mock_session, mock_url):\n        mock_generate.return_value = \"Mocked response\"\n        result = self.runner.invoke(\n            cli, [\"-m\", \"gpt-3.5-turbo\", \"complete\", \"-\"], input=\"Test prompt\"\n        )\n        self.assertEqual(result.exit_code, 0)\n        mock_generate.assert_called_once_with(\n            \"Test prompt\",\n            conversation_history=[\n                {\"role\": \"user\", \"content\": \"Test prompt\"},\n                {\"role\": \"assistant\", \"content\": \"Mocked response\"},\n            ],\n            model=\"gpt-3.5-turbo\",\n            max_tokens=MAX_TOKENS,\n            temperature=TEMPERATURE,\n        )\n\n    @patch(\"openai_cli.cli.generate_response\")\n    def test_max_tokens_option(self, mock_generate, mock_session, mock_url):\n        mock_generate.return_value = \"Mocked response\"\n        result = self.runner.invoke(cli, [\"-k\", \"100\", \"complete\", \"-\"], input=\"Test prompt\")\n        self.assertEqual(result.exit_code, 0)\n        mock_generate.assert_called_once_with(\n            \"Test prompt\",\n            conversation_history=[\n                {\"role\": \"user\", \"content\": \"Test prompt\"},\n                {\"role\": \"assistant\", \"content\": \"Mocked response\"},\n            ],\n            model=DEFAULT_MODEL,\n            max_tokens=100,\n            temperature=TEMPERATURE,\n        )\n\n    @patch(\"openai_cli.cli.generate_response\")\n    def test_temperature_option(self, mock_generate, mock_session, mock_url):\n        mock_generate.return_value = \"Mocked response\"\n        result = self.runner.invoke(cli, [\"-p\", \"0.8\", \"complete\", \"-\"], input=\"Test prompt\")\n        self.assertEqual(result.exit_code, 0)\n        mock_generate.assert_called_once_with(\n            \"Test prompt\",\n            conversation_history=[\n                {\"role\": \"user\", \"content\": \"Test prompt\"},\n                {\"role\": \"assistant\", \"content\": \"Mocked response\"},\n            ],\n            model=DEFAULT_MODEL,\n            max_tokens=MAX_TOKENS,\n            temperature=0.8,\n        )\n\n    @patch(\"openai_cli.cli.set_openai_api_key\")\n    @patch(\"openai_cli.cli.generate_response\")\n    def test_token_option(self, mock_generate, mock_set_key, mock_session, mock_url):\n        mock_generate.return_value = \"Mocked response\"\n        result = self.runner.invoke(cli, [\"-t\", \"test_token\", \"complete\", \"-\"], input=\"Test prompt\")\n        self.assertEqual(result.exit_code, 0)\n        mock_set_key.assert_called_once_with(\"test_token\")\n        mock_generate.assert_called_once_with(\n            \"Test prompt\",\n            conversation_history=[\n                {\"role\": \"user\", \"content\": \"Test prompt\"},\n                {\"role\": \"assistant\", \"content\": \"Mocked response\"},\n            ],\n            model=DEFAULT_MODEL,\n            max_tokens=MAX_TOKENS,\n            temperature=TEMPERATURE,\n        )\n        self.assertIn(\"Mocked response\", result.output)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "src/openai_cli/test_client.py",
    "content": "import unittest\nfrom unittest.mock import MagicMock, PropertyMock, patch\n\nimport requests\n\nfrom openai_cli.client import OpenAIError, generate_response, initialize_session\nfrom openai_cli.config import DEFAULT_API_BASE_URL\n\n\nclass TestClient(unittest.TestCase):\n\n    @patch(\"openai_cli.client.requests.Session.post\")\n    @patch(\"openai_cli.client.get_openai_api_url\", return_value=DEFAULT_API_BASE_URL)\n    @patch(\"openai_cli.client.get_openai_api_key\", return_value=\"test_api_key\")\n    @patch(\n        \"openai_cli.client.requests.Session\", autospec=True\n    )  # Mocking the Session itself to prevent any real network interaction\n    def test_generate_response_success(self, mock_session_cls, mock_get_key, mock_get_url, mock_post):\n        mock_response = MagicMock()\n        mock_response.json.return_value = {\"choices\": [{\"message\": {\"content\": \"Mocked response\"}}]}\n        mock_response.status_code = 200\n        mock_post.return_value = mock_response\n\n        mock_session = mock_session_cls.return_value\n        mock_session.post = mock_post\n\n        type(mock_session).headers = PropertyMock(return_value={})\n\n        response = generate_response(\"Test prompt\")\n        self.assertEqual(response, \"Mocked response\")\n        mock_post.assert_called_once_with(\n            DEFAULT_API_BASE_URL,\n            data=unittest.mock.ANY\n        )\n\n    @patch(\"openai_cli.client.requests.Session.post\")\n    @patch(\"openai_cli.client.get_openai_api_url\", return_value=DEFAULT_API_BASE_URL)\n    @patch(\"openai_cli.client.get_openai_api_key\", return_value=\"test_api_key\")\n    @patch(\"openai_cli.client.requests.Session\", autospec=True)\n    def test_generate_response_error(self, mock_session_cls, mock_get_key, mock_get_url, mock_post):\n        mock_post.side_effect = requests.RequestException(\"API Error\")\n\n        mock_session = mock_session_cls.return_value\n        mock_session.post = mock_post\n\n        type(mock_session).headers = PropertyMock(return_value={})\n\n        with self.assertRaises(OpenAIError):\n            generate_response(\"Test prompt\")\n\n    @patch(\"openai_cli.client.get_openai_api_url\", return_value=\"https://custom.api/v1\")\n    @patch(\"openai_cli.client.requests.Session.post\")\n    @patch(\"openai_cli.client.get_openai_api_key\", return_value=\"test_api_key\")\n    @patch(\"openai_cli.client.requests.Session\", autospec=True)\n    def test_generate_response_custom_url(self, mock_session_cls, mock_get_key, mock_post, mock_get_url):\n        mock_response = MagicMock()\n        mock_response.json.return_value = {\"choices\": [{\"message\": {\"content\": \"Mocked response\"}}]}\n        mock_response.status_code = 200\n        mock_post.return_value = mock_response\n\n        mock_session = mock_session_cls.return_value\n        mock_session.post = mock_post\n\n        type(mock_session).headers = PropertyMock(return_value={})\n\n        response = generate_response(\"Test prompt\")\n        self.assertEqual(response, \"Mocked response\")\n        mock_post.assert_called_once_with(\n            \"https://custom.api/v1\",\n            data=unittest.mock.ANY\n        )\n\n    @patch(\"openai_cli.client.get_openai_api_key\")\n    @patch(\"openai_cli.client.requests.Session\", autospec=True)\n    def test_initialize_session_success(self, mock_session_cls, mock_get_key):\n        mock_get_key.return_value = \"test_api_key\"\n\n        mock_session = mock_session_cls.return_value\n        mock_session.headers = {}\n\n        session = initialize_session()\n        mock_session_cls.assert_called_once()\n        self.assertEqual(session.headers[\"Authorization\"], \"Bearer test_api_key\")\n\n    @patch(\"openai_cli.client.get_openai_api_key\")\n    def test_initialize_session_no_key(self, mock_get_key):\n        mock_get_key.return_value = \"\"\n\n        with self.assertRaises(OpenAIError):\n            initialize_session()\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "src/openai_cli/test_config.py",
    "content": "import unittest\nfrom unittest.mock import patch\n\nfrom openai_cli.config import (\n    DEFAULT_API_BASE_URL,\n    get_openai_api_key,\n    get_openai_api_url,\n    set_openai_api_key,\n    set_openai_api_url,\n)\n\n\nclass TestConfig(unittest.TestCase):\n    @patch(\"os.getenv\")\n    def test_get_openai_api_key_set(self, mock_getenv):\n        mock_getenv.return_value = \"test_api_key\"\n        self.assertEqual(get_openai_api_key(), \"test_api_key\")\n\n    @patch(\"os.getenv\")\n    def test_get_openai_api_key_not_set(self, mock_getenv):\n        mock_getenv.return_value = \"\"\n        self.assertEqual(get_openai_api_key(), \"\")\n\n    @patch(\"os.environ\")\n    def test_set_openai_api_key(self, mock_environ):\n        set_openai_api_key(\"new_api_key\")\n        mock_environ.__setitem__.assert_called_once_with(\"OPENAI_API_KEY\", \"new_api_key\")\n\n    @patch(\"os.getenv\")\n    def test_get_openai_api_url_set(self, mock_getenv):\n        custom_url = \"https://custom.openai.api/v1\"\n        mock_getenv.return_value = custom_url\n        self.assertEqual(get_openai_api_url(), custom_url)\n\n    @patch(\"os.getenv\")\n    def test_get_openai_api_url_not_set(self, mock_getenv):\n        mock_getenv.return_value = None\n        self.assertEqual(get_openai_api_url(), DEFAULT_API_BASE_URL)\n\n    @patch(\"os.environ\")\n    def test_set_openai_api_url(self, mock_environ):\n        custom_url = \"https://custom.openai.api/v1\"\n        set_openai_api_url(custom_url)\n        mock_environ.__setitem__.assert_called_once_with(\"OPENAI_API_URL\", custom_url)\n\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "tox.ini",
    "content": "[tox]\nenvlist = python\nminversion = 3.12\ntox_pip_extensions_ext_venv_update = true\ntoxworkdir={env:TOX_WORK_DIR:.tox}\n\n[testenv]\ndeps =\ncommands =\n\tpytest {posargs}\nusedevelop = True\nextras = testing\nallowlist_externals =\n    pytest\nsetenv=\n    OPENAI_API_KEY='<openai-api-key>'\n\n[testenv:docs]\nextras =\n\tdocs\n\ttesting\nchangedir = docs\ncommands =\n\tpython -m sphinx -W --keep-going . {toxinidir}/build/html\n\n[testenv:release]\nskip_install = True\ndeps =\n\tbuild\n\ttwine>=3\npassenv =\n\tTWINE_PASSWORD\n\tGITHUB_TOKEN\nsetenv =\n\tTWINE_USERNAME = {env:TWINE_USERNAME:__token__}\ncommands =\n\tpython -c \"import shutil; shutil.rmtree('dist', ignore_errors=True)\"\n\tpython -m build\n\tpython -m twine upload dist/*\n"
  }
]