[
  {
    "path": ".devenv",
    "content": "LLM_API_KEY=\nLLM_PROVIDER=\nLLM_MODEL_NAME=\nADOPT_CLIENT_ID=\nADOPT_SECRET_KEY=\nYOUR_API_URL="
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug-report.yml",
    "content": "name: \"🐞 Bug Report\"\ndescription: \"Report a bug or unexpected behavior in ZAPI\"\ntitle: \"[Bug]: <Short description>\"\nlabels: [\"bug\", \"needs-triage\"]\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        ## 🐞 Bug Report\n        Thanks for taking the time to report a bug! Please provide as much detail as possible to help us investigate and fix it quickly.\n\n  - type: input\n    id: zapi_version\n    attributes:\n      label: \"ZAPI Version\"\n      description: \"Version of ZAPI you're using (check with `pip show zapi`)\"\n      placeholder: \"0.1.0\"\n    validations:\n      required: true\n\n  - type: input\n    id: python_version\n    attributes:\n      label: \"Python Version\"\n      description: \"Python version and operating system\"\n      placeholder: \"Python 3.11 on macOS 14.2 or Python 3.9 on Ubuntu 22.04\"\n    validations:\n      required: true\n\n  - type: dropdown\n    id: component\n    attributes:\n      label: \"Component\"\n      description: \"Which part of ZAPI is affected?\"\n      options:\n        - Browser Session / Playwright\n        - HAR Processing / Analysis\n        - LLM Key Management / BYOK\n        - LangChain Integration\n        - Authentication / OAuth\n        - File Upload\n        - Other\n      default: 0\n    validations:\n      required: true\n\n  - type: dropdown\n    id: environment\n    attributes:\n      label: \"Environment\"\n      description: \"Where did this issue occur?\"\n      options:\n        - Local Development\n        - CI/CD Pipeline\n        - Docker Container\n        - Cloud Deployment\n        - Other\n      default: 0\n    validations:\n      required: true\n\n  - type: textarea\n    id: description\n    attributes:\n      label: \"Describe the Bug\"\n      description: \"What happened? What did you expect to happen instead?\"\n      placeholder: |\n        When calling `z.launch_browser(url=\"https://example.com\")`, the browser crashes immediately.\n        Expected: Browser should launch and navigate to the URL successfully.\n    validations:\n      required: true\n\n  - type: textarea\n    id: reproduction_steps\n    attributes:\n      label: \"Steps to Reproduce\"\n      description: \"Please include exact steps or code to reproduce the issue\"\n      placeholder: |\n        1. Initialize ZAPI with valid credentials\n        2. Call `z.launch_browser(url=\"https://example.com\")`\n        3. Browser crashes with error\n    validations:\n      required: true\n\n  - type: textarea\n    id: code_snippet\n    attributes:\n      label: \"Minimal Reproducible Example\"\n      description: \"Paste code to reproduce (remove sensitive data like API keys)\"\n      placeholder: |\n        ```python\n        from zapi import ZAPI\n        \n        z = ZAPI()\n        session = z.launch_browser(url=\"https://example.com\")\n        session.dump_logs(\"session.har\")\n        session.close()\n        ```\n      render: python\n\n  - type: textarea\n    id: error_logs\n    attributes:\n      label: \"Error Output / Stack Trace\"\n      description: \"Paste the full error output or traceback\"\n      render: shell\n      placeholder: |\n        Traceback (most recent call last):\n          File \"demo.py\", line 10, in <module>\n            session = z.launch_browser(url=\"https://example.com\")\n          File \"zapi/core.py\", line 367, in launch_browser\n            raise ZAPIError(f\"Failed to launch browser session: {error_message}\")\n        zapi.core.ZAPIError: Failed to launch browser session: ...\n    validations:\n      required: true\n\n  - type: textarea\n    id: evidence\n    attributes:\n      label: \"Evidence / Demo\"\n      description: \"Provide screenshots, video recording, or terminal output showing the issue\"\n      placeholder: |\n        - Screenshot: [Attach image]\n        - Video: [Link to Loom/YouTube]\n        - Terminal output: [Paste relevant logs]\n        - HAR file snippet: [If applicable]\n\n  - type: checkboxes\n    id: reproducibility\n    attributes:\n      label: \"Reproducibility\"\n      description: \"How consistently does the bug occur?\"\n      options:\n        - label: \"Always reproducible\"\n        - label: \"Intermittent / Sometimes\"\n        - label: \"Happened once, can't reproduce\"\n\n  - type: textarea\n    id: environment_details\n    attributes:\n      label: \"Environment Details\"\n      description: \"Additional environment information (optional)\"\n      placeholder: |\n        - Playwright version: 1.40.0\n        - Browser: Chromium 120.0.6099.109\n        - LLM Provider: anthropic\n        - Headless mode: True/False\n\n  - type: textarea\n    id: additional_context\n    attributes:\n      label: \"Additional Context or Screenshots\"\n      description: \"Add logs, screenshots, HAR files, or related issues if available\"\n\n  - type: checkboxes\n    id: checklist\n    attributes:\n      label: \"Pre-submission Checklist\"\n      options:\n        - label: \"I have searched existing issues to avoid duplicates\"\n          required: true\n        - label: \"I have removed sensitive data (API keys, tokens) from code snippets\"\n          required: true\n        - label: \"I have tested with the latest version of ZAPI\"\n          required: false\n\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yml",
    "content": "blank_issues_enabled: false\ncontact_links:\n  - name: 📚 Documentation\n    url: https://github.com/adoptai/zapi/blob/main/README.md\n    about: Read the full documentation and usage guides\n  - name: 💬 GitHub Discussions\n    url: https://github.com/adoptai/zapi/discussions\n    about: Ask questions and discuss ideas with the community\n  - name: 🌐 Adopt AI Website\n    url: https://www.adopt.ai\n    about: Visit the Adopt AI website for more information\n  - name: 🐦 Follow us on X (Twitter)\n    url: https://twitter.com/getadoptai\n    about: Stay updated with the latest news and announcements\n\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature-request.yml",
    "content": "name: \"🚀 Feature Request\"\ndescription: \"Suggest a new feature or improvement for ZAPI\"\ntitle: \"[Feature]: <Short description>\"\nlabels: [\"feature-request\", \"enhancement\"]\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        ## 🚀 Feature Request\n        Have an idea that can make ZAPI better? Please describe it below as clearly as possible.  \n        The more context you give, the easier it is for us to prioritize and implement!\n\n  - type: dropdown\n    id: area\n    attributes:\n      label: \"Area of Improvement\"\n      description: \"Which part of ZAPI does this request relate to?\"\n      options:\n        - Browser Session / Playwright Integration\n        - HAR Processing / Analysis\n        - LLM Provider Support\n        - LangChain Integration\n        - Authentication / Security\n        - API Discovery Features\n        - Documentation\n        - Developer Experience\n        - Other\n      default: 0\n    validations:\n      required: true\n\n  - type: input\n    id: feature_title\n    attributes:\n      label: \"Feature Name\"\n      description: \"Short descriptive name for the feature\"\n      placeholder: \"Add support for Gemini LLM provider\"\n    validations:\n      required: true\n\n  - type: textarea\n    id: feature_description\n    attributes:\n      label: \"Describe the Feature\"\n      description: \"What would you like to see added or improved?\"\n      placeholder: |\n        I'd like ZAPI to support Google's Gemini API as an LLM provider for API discovery, \n        similar to how it currently supports Anthropic, OpenAI, Google, and Groq.\n    validations:\n      required: true\n\n  - type: textarea\n    id: use_case\n    attributes:\n      label: \"Use Case / Motivation\"\n      description: \"Explain why this feature is valuable. What problem does it solve?\"\n      placeholder: |\n        - My team uses Gemini for all LLM tasks and wants consistency\n        - Gemini offers better pricing for our use case\n        - We need multi-modal capabilities for API documentation\n    validations:\n      required: true\n\n  - type: textarea\n    id: proposed_solution\n    attributes:\n      label: \"Proposed Solution or API Design (Optional)\"\n      description: \"How would you like this to work? Feel free to propose code examples.\"\n      placeholder: |\n        Example usage:\n        ```python\n        from zapi import ZAPI\n        \n        z = ZAPI(\n            llm_provider=\"gemini\",\n            llm_model_name=\"gemini-1.5-pro\",\n            llm_api_key=\"your-gemini-key\"\n        )\n        \n        session = z.launch_browser(url=\"https://example.com\")\n        # ... rest of workflow\n        ```\n\n  - type: dropdown\n    id: priority\n    attributes:\n      label: \"Priority (from your perspective)\"\n      description: \"How important is this feature to you?\"\n      options:\n        - Critical - Blocking my workflow\n        - High - Would significantly improve my experience\n        - Medium - Nice to have\n        - Low - Just an idea\n      default: 2\n\n  - type: checkboxes\n    id: impact_scope\n    attributes:\n      label: \"Who does this impact?\"\n      options:\n        - label: \"Python developers using ZAPI\"\n        - label: \"LangChain users\"\n        - label: \"API discovery workflows\"\n        - label: \"HAR processing pipelines\"\n        - label: \"Security / BYOK users\"\n\n  - type: textarea\n    id: alternatives\n    attributes:\n      label: \"Alternatives Considered\"\n      description: \"Have you considered any workarounds or alternative approaches?\"\n      placeholder: |\n        - Currently using OpenAI but prefer Gemini\n        - Manual HAR processing with custom scripts\n\n  - type: textarea\n    id: related_issues\n    attributes:\n      label: \"Related Issues / References\"\n      description: \"Link any related GitHub issues, docs, or external resources\"\n      placeholder: \"#42, https://ai.google.dev/gemini-api/docs\"\n\n  - type: checkboxes\n    id: willingness\n    attributes:\n      label: \"Would you like to contribute to this feature?\"\n      options:\n        - label: \"Yes, I can help implement it\"\n        - label: \"Maybe, I can help test or review\"\n        - label: \"No, just sharing the idea\"\n\n  - type: textarea\n    id: additional_context\n    attributes:\n      label: \"Additional Context\"\n      description: \"Any extra information, mockups, code samples, or screenshots\"\n\n  - type: checkboxes\n    id: checklist\n    attributes:\n      label: \"Pre-submission Checklist\"\n      options:\n        - label: \"I have searched existing issues to avoid duplicates\"\n          required: true\n        - label: \"I have checked the documentation to ensure this isn't already supported\"\n          required: true\n\n"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "## Description\n\n<!-- Provide a clear and concise description of what this PR does -->\n\n## Type of Change\n\n<!-- Check all that apply -->\n\n- [ ] Bug fix (non-breaking change that fixes an issue)\n- [ ] New feature (non-breaking change that adds functionality)\n- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)\n- [ ] Documentation update\n- [ ] Code refactoring\n- [ ] Performance improvement\n- [ ] Test coverage improvement\n\n## Related Issues\n\n<!-- Link related issues using #issue_number -->\n\nFixes #\nRelates to #\n\n## Changes Made\n\n<!-- List the main changes in bullet points -->\n\n- \n- \n- \n\n## Testing\n\n<!-- Describe how you tested your changes -->\n\n- [ ] Tested with `demo.py`\n- [ ] Tested with example scripts\n- [ ] Tested error cases\n- [ ] Tested with different Python versions\n- [ ] Tested browser interactions (if applicable)\n- [ ] Tested HAR processing (if applicable)\n- [ ] Tested LangChain integration (if applicable)\n\n### Test Environment\n\n- Python version: \n- Operating System: \n- ZAPI version: \n\n## Evidence / Demo\n\n<!-- Provide evidence that your changes work as expected -->\n\n### Code Snippet / Reproduction\n\n```python\n# Paste code demonstrating the fix or feature\n\n```\n\n### Output / Screenshots\n\n<!-- Add screenshots, terminal output, or video demos if applicable -->\n\n```\n# Paste relevant output here\n\n```\n\n## Documentation\n\n- [ ] Updated README.md (if needed)\n- [ ] Updated docstrings\n- [ ] Updated CONTRIBUTING.md (if needed)\n- [ ] Added/updated code examples\n\n## Checklist\n\n- [ ] My code follows the project's coding standards\n- [ ] I have performed a self-review of my code\n- [ ] I have commented my code, particularly in hard-to-understand areas\n- [ ] My changes generate no new warnings or errors\n- [ ] I have removed any sensitive data (API keys, tokens) from the code\n- [ ] I have tested that existing functionality still works\n- [ ] I have read the [CONTRIBUTING.md](../CONTRIBUTING.md) guide\n\n## Additional Context\n\n<!-- Add any other context about the PR here -->\n\n"
  },
  {
    "path": ".github/workflows/ruff-check.yml",
    "content": "name: Ruff Linting\n\non:\n  pull_request:\n    branches:\n      - main\n      - dev\n    paths:\n      - '**.py'\n      - 'pyproject.toml'\n      - 'requirements.txt'\n      - '.github/workflows/ruff-check.yml'\n  push:\n    branches:\n      - main\n      - dev\n    paths:\n      - '**.py'\n      - 'pyproject.toml'\n      - 'requirements.txt'\n      - '.github/workflows/ruff-check.yml'\n\njobs:\n  ruff-check:\n    runs-on: ubuntu-latest\n    \n    steps:\n      - name: Checkout code\n        uses: actions/checkout@v4\n\n      - name: Set up Python\n        uses: actions/setup-python@v5\n        with:\n          python-version: '3.11'\n\n      - name: Install Ruff\n        run: |\n          pip install ruff\n\n      - name: Run Ruff Linter\n        run: |\n          ruff check . --output-format=github\n\n      - name: Run Ruff Formatter Check\n        run: |\n          ruff format --check .\n\n"
  },
  {
    "path": ".gitignore",
    "content": "# Python\n__pycache__/\n*.py[cod]\n*$py.class\n*.so\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# Virtual environments\n.venv/\nvenv/\nenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Environment variables\n.env\n\n# API credentials\napi-headers.json\n\n# IDEs\n.vscode/\n.idea/\n*.swp\n*.swo\n*~\n.DS_Store\n\n# Testing\n.pytest_cache/\n.coverage\nhtmlcov/\n.tox/\n.hypothesis/\n\n# HAR files\n*.har\n\n# Poetry lock file\npoetry.lock\n\n# Playwright\nplaywright-report/\ntest-results/\n\n# Temporary files\n*.log\n*.tmp\n.temp/\n\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "# Pre-commit hooks for ZAPI\n# See https://pre-commit.com for more information\n\nrepos:\n  # Ruff - Fast Python linter and formatter\n  - repo: https://github.com/astral-sh/ruff-pre-commit\n    rev: v0.6.9\n    hooks:\n      # Run the linter\n      - id: ruff\n        args: [--fix]\n        types_or: [python, pyi]\n      # Run the formatter\n      - id: ruff-format\n        types_or: [python, pyi]\n\n  # Additional useful hooks\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v4.6.0\n    hooks:\n      # Prevent committing large files\n      - id: check-added-large-files\n        args: ['--maxkb=1000']\n      # Check for files that would conflict in case-insensitive filesystems\n      - id: check-case-conflict\n      # Check for merge conflicts\n      - id: check-merge-conflict\n      # Check YAML files\n      - id: check-yaml\n        exclude: ^\\.github/workflows/\n      # Check TOML files\n      - id: check-toml\n      # Check JSON files\n      - id: check-json\n      # Trim trailing whitespace\n      - id: trailing-whitespace\n        exclude: ^\\.github/\n      # Ensure files end with newline\n      - id: end-of-file-fixer\n        exclude: ^\\.github/\n      # Prevent committing to main/master\n      - id: no-commit-to-branch\n        args: ['--branch', 'main', '--branch', 'master']\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing to ZAPI\n\nThank you for your interest in contributing to ZAPI! This document provides guidelines and instructions for contributing to the project.\n\n## Table of Contents\n\n- [Development Setup](#development-setup)\n- [Project Structure](#project-structure)\n- [Coding Standards](#coding-standards)\n- [Documentation Requirements](#documentation-requirements)\n- [Pull Request Process](#pull-request-process)\n- [Adding New LLM Providers](#adding-new-llm-providers)\n- [Testing Guidelines](#testing-guidelines)\n- [Release Process](#release-process)\n\n## Development Setup\n\n### Prerequisites\n\n- Python 3.9 or later\n- pip (Python package manager)\n- Git\n- [Playwright](https://playwright.dev/python/) browser binaries\n\n### Getting Started\n\n1. Fork and clone the repository:\n\n   ```bash\n   git clone https://github.com/YOUR_USERNAME/zapi.git\n   cd zapi\n   ```\n\n2. Create a virtual environment (recommended):\n\n   ```bash\n   python -m venv venv\n   source venv/bin/activate  # On Windows: venv\\Scripts\\activate\n   ```\n\n3. Install dependencies:\n\n   ```bash\n   pip install -r requirements.txt\n   ```\n\n4. Install Playwright browser binaries:\n\n   ```bash\n   playwright install\n   ```\n\n5. Set up your environment variables:\n\n   ```bash\n   cp .devenv .env\n   # Edit .env with your credentials from app.adopt.ai\n   ```\n\n6. Install Ruff for linting and formatting:\n\n   ```bash\n   pip install ruff\n   ```\n\n7. Install pre-commit hooks (recommended):\n\n   ```bash\n   pip install pre-commit\n   pre-commit install\n   ```\n\n   This will automatically run Ruff checks before every commit.\n\n8. Test the installation:\n\n   ```bash\n   python demo.py\n   ```\n\n### Development Commands\n\n```bash\n# Run the demo script\npython demo.py\n\n# Run specific examples\npython examples/basic_usage.py\npython examples/langchain/demo.py\n\n# Test HAR processing\npython -c \"from zapi import analyze_har_file; analyze_har_file('demo_session.har')\"\n```\n\n### Code Quality Tools\n\nZAPI uses [Ruff](https://docs.astral.sh/ruff/) for fast linting and formatting. All PRs are automatically checked via GitHub Actions.\n\n**Run linting checks:**\n\n```bash\n# Check for linting issues\nruff check .\n\n# Auto-fix linting issues\nruff check . --fix\n```\n\n**Run formatting checks:**\n\n```bash\n# Check if code is formatted correctly\nruff format --check .\n\n# Format code automatically\nruff format .\n```\n\n**Before submitting a PR:**\n\n```bash\n# Option 1: Run pre-commit hooks manually\npre-commit run --all-files\n\n# Option 2: Run Ruff directly\nruff check .\nruff format --check .\n\n# Option 3: Use the pre-commit script\n./scripts/pre-commit.sh\n\n# Or fix everything automatically\nruff check . --fix\nruff format .\n```\n\n**Configuration:**\n\nRuff settings are defined in `pyproject.toml`. Key settings:\n- Line length: 120 characters\n- Target: Python 3.9+\n- Enabled rules: pycodestyle, pyflakes, isort, pep8-naming, pyupgrade, flake8-bugbear, and more\n\n### Pre-commit Hooks\n\nZAPI uses [pre-commit](https://pre-commit.com/) to automatically run checks before commits:\n\n**Setup (one-time):**\n```bash\npip install pre-commit\npre-commit install\n```\n\n**What it does:**\n- ✅ Runs Ruff linter with auto-fix\n- ✅ Runs Ruff formatter\n- ✅ Checks for large files (>1MB)\n- ✅ Checks YAML, TOML, JSON syntax\n- ✅ Trims trailing whitespace\n- ✅ Prevents commits to main/master\n\n**Manual run:**\n```bash\n# Run on all files\npre-commit run --all-files\n\n# Run on staged files only\npre-commit run\n\n# Use the standalone script\n./scripts/pre-commit.sh\n```\n\n**Skip hooks (not recommended):**\n```bash\ngit commit --no-verify\n```\n\n## Project Structure\n\n```\nzapi/\n├── zapi/                      # Main package directory\n│   ├── __init__.py           # Package exports\n│   ├── core.py               # ZAPI class, OAuth, BYOK encryption\n│   ├── session.py            # BrowserSession with Playwright\n│   ├── auth.py               # Authentication handlers\n│   ├── providers.py          # LLM provider validation\n│   ├── encryption.py         # AES-256-GCM key encryption\n│   ├── har_processing.py     # HAR analysis and filtering\n│   ├── utils.py              # Helper utilities\n│   ├── constants.py          # Configuration constants\n│   ├── exceptions.py         # Custom exception classes\n│   └── integrations/\n│       └── langchain/\n│           └── tool.py       # LangChain tool integration\n├── examples/                  # Example scripts\n│   ├── basic_usage.py\n│   ├── async_usage.py\n│   └── langchain/\n│       ├── demo.py           # Interactive LangChain demo\n│       └── README.md         # LangChain integration guide\n├── docs/                      # Documentation\n├── demo.py                    # End-to-end demo script\n├── requirements.txt           # Python dependencies\n├── pyproject.toml            # Package metadata\n├── setup.py                  # Setup script\n├── README.md                 # Main documentation\n└── CONTRIBUTING.md           # This file\n```\n\n### Key Modules\n\n| Module | Purpose |\n|--------|---------|\n| `zapi/core.py` | Main `ZAPI` class with credential loading, OAuth token exchange, BYOK encryption, HAR upload, and API documentation fetching |\n| `zapi/session.py` | `BrowserSession` wrapper around Playwright with auth injection, HAR recording, navigation helpers, and error handling |\n| `zapi/providers.py` | LLM provider validation for Anthropic, OpenAI, Google, and Groq with format-specific checks |\n| `zapi/encryption.py` | `LLMKeyEncryption` class using AES-256-GCM for secure key storage |\n| `zapi/har_processing.py` | `HarProcessor` for filtering static assets, analyzing API calls, and cost estimation |\n| `zapi/integrations/langchain/tool.py` | `ZAPILangchainTool` for converting documented APIs into LangChain tools |\n\n## Coding Standards\n\n### Python Style Guide\n\n1. Follow [PEP 8](https://pep8.org/) style guidelines\n2. Use type hints for all function parameters and return values\n3. Use docstrings for all public classes, methods, and functions\n4. Keep functions focused and under 50 lines when possible\n5. Use meaningful variable and function names\n6. Prefer explicit over implicit\n7. Use `pathlib.Path` for file operations\n8. Use f-strings for string formatting\n\n### File Headers\n\nEvery Python module should include a docstring at the top:\n\n```python\n\"\"\"Module description.\n\nDetailed explanation of what this module does and how it fits\ninto the larger ZAPI architecture.\n\"\"\"\n```\n\n### Function Documentation\n\nEvery public function must include a docstring with:\n\n1. Brief description\n2. Args section with type hints\n3. Returns section\n4. Raises section for exceptions\n5. Example usage (for user-facing functions)\n\nExample:\n\n```python\ndef analyze_har_file(\n    har_file_path: str,\n    save_filtered: bool = False,\n    filtered_output_path: Optional[str] = None\n) -> Tuple[HarStats, str, Optional[str]]:\n    \"\"\"\n    Analyze a HAR file and generate statistics.\n\n    This function loads a HAR file, filters out static assets,\n    and provides cost/time estimates for API discovery processing.\n\n    Args:\n        har_file_path: Path to the HAR file to analyze\n        save_filtered: Whether to save filtered HAR with only API entries\n        filtered_output_path: Custom path for filtered HAR (optional)\n\n    Returns:\n        Tuple of (statistics, formatted_report, filtered_file_path)\n\n    Raises:\n        HarProcessingError: If HAR file is invalid or cannot be processed\n        FileNotFoundError: If HAR file does not exist\n\n    Example:\n        >>> stats, report, filtered = analyze_har_file(\"session.har\", save_filtered=True)\n        >>> print(f\"API entries: {stats.valid_entries}\")\n        >>> print(f\"Estimated cost: ${stats.estimated_cost_usd:.2f}\")\n    \"\"\"\n    # Implementation\n```\n\n### Error Handling\n\n1. Use custom exception classes from `zapi/exceptions.py`\n2. Provide meaningful error messages\n3. Include context in error messages (e.g., file paths, URLs)\n4. Document all exceptions in function docstrings\n5. Use try-except blocks appropriately\n6. Log errors when appropriate\n\nExample:\n\n```python\nfrom .core import ZAPIValidationError, ZAPINetworkError\n\ndef upload_har(self, har_file: str):\n    \"\"\"Upload HAR file to ZAPI service.\"\"\"\n    try:\n        with open(har_file, 'rb') as f:\n            # Upload logic\n            pass\n    except FileNotFoundError:\n        raise ZAPIValidationError(f\"HAR file not found: '{har_file}'\")\n    except requests.exceptions.ConnectionError:\n        raise ZAPINetworkError(\n            \"Cannot connect to ZAPI service. \"\n            \"Please check your internet connection.\"\n        )\n```\n\n### Code Organization\n\n1. Group imports in this order:\n   - Standard library imports\n   - Third-party imports\n   - Local application imports\n2. Use blank lines to separate logical sections\n3. Keep related functionality together\n4. Extract complex logic into helper functions\n5. Use constants for magic numbers and strings\n\n## Documentation Requirements\n\n### Module Documentation\n\nEach module should have:\n\n1. Clear docstring explaining its purpose\n2. Usage examples for public APIs\n3. Type hints for all functions\n4. Inline comments for complex logic\n\n### README Updates\n\nWhen adding new features:\n\n1. Update the main README.md with usage examples\n2. Add to the appropriate section (Quick Start, API Reference, etc.)\n3. Include code examples that users can copy-paste\n4. Update the Table of Contents if adding new sections\n\n### Example Scripts\n\nWhen creating example scripts:\n\n1. Add them to the `examples/` directory\n2. Include a header comment explaining what the example demonstrates\n3. Make examples self-contained and runnable\n4. Use clear variable names and comments\n5. Handle errors gracefully with informative messages\n\n## Pull Request Process\n\n1. Create a feature branch from `dev`:\n\n   ```bash\n   git checkout dev\n   git pull origin dev\n   git checkout -b feature/your-feature-name\n   ```\n\n2. Make your changes following the coding standards\n\n3. Test your changes thoroughly:\n   - Run existing examples to ensure no regressions\n   - Test error cases\n   - Test with different Python versions if possible\n\n4. Update documentation:\n   - Add/update docstrings\n   - Update README.md if needed\n   - Add example usage if applicable\n\n5. Commit your changes with clear messages:\n\n   ```bash\n   git add .\n   git commit -m \"Add feature: brief description\"\n   ```\n\n6. Push to your fork and create a pull request:\n\n   ```bash\n   git push origin feature/your-feature-name\n   ```\n\n7. In your pull request description:\n   - Explain what the change does\n   - Link to any related issues\n   - Include screenshots/examples if applicable\n   - List any breaking changes\n\n8. Wait for review and address feedback\n\n### Pull Request Guidelines\n\n- Keep PRs focused on a single feature or fix\n- Write clear commit messages\n- Include tests if applicable\n- Update documentation\n- **Ensure code passes Ruff checks** (`ruff check .` and `ruff format --check .`)\n- Respond to review comments promptly\n\n**Note:** All PRs are automatically checked by GitHub Actions for code quality using Ruff. Make sure to run the checks locally before submitting to avoid CI failures.\n\n## Adding New LLM Providers\n\nTo add support for a new LLM provider:\n\n1. Update `zapi/providers.py`:\n\n   ```python\n   class LLMProvider(Enum):\n       # ... existing providers ...\n       NEW_PROVIDER = \"newprovider\"\n   ```\n\n2. Add validation logic in `_validate_key_format()`:\n\n   ```python\n   elif provider == LLMProvider.NEW_PROVIDER.value:\n       if not api_key.startswith(\"expected-prefix-\"):\n           raise LLMKeyException(\"NewProvider API keys must start with 'expected-prefix-'\")\n       if len(api_key) < 20:\n           raise LLMKeyException(\"NewProvider API keys must be at least 20 characters long\")\n   ```\n\n3. Update `get_supported_providers_info()`:\n\n   ```python\n   \"newprovider\": {\n       \"display_name\": \"NewProvider\",\n       \"support_level\": \"main\",\n       \"description\": \"Fully supported with complete validation\"\n   }\n   ```\n\n4. Update documentation:\n   - Add provider to README.md supported providers list\n   - Add example usage in Environment Setup section\n   - Update `zapi/utils.py` if needed for environment variable mapping\n\n5. Test the new provider:\n   - Test key validation\n   - Test encryption/decryption\n   - Test with actual API calls if possible\n\n## Testing Guidelines\n\n### Manual Testing\n\n1. Test with the demo script:\n   ```bash\n   python demo.py\n   ```\n\n2. Test specific features:\n   ```bash\n   # Test HAR analysis\n   python -c \"from zapi import analyze_har_file; print(analyze_har_file('demo_session.har'))\"\n\n   # Test LangChain integration\n   python examples/langchain/demo.py\n   ```\n\n3. Test error cases:\n   - Invalid credentials\n   - Invalid URLs\n   - Missing files\n   - Network errors\n\n### Testing Checklist\n\nBefore submitting a PR, verify:\n\n- [ ] Code runs without errors\n- [ ] All examples still work\n- [ ] Error messages are clear and helpful\n- [ ] Documentation is updated\n- [ ] No sensitive data in code or commits\n- [ ] **Code passes Ruff linting** (`ruff check .`)\n- [ ] **Code is properly formatted** (`ruff format --check .`)\n- [ ] New features have usage examples\n\n## Release Process\n\nZAPI follows semantic versioning (MAJOR.MINOR.PATCH):\n\n- **MAJOR**: Breaking changes\n- **MINOR**: New features (backward compatible)\n- **PATCH**: Bug fixes (backward compatible)\n\n### Creating a Release\n\n1. Update version in `pyproject.toml` and `setup.py`\n2. Update `__version__` in `zapi/__init__.py`\n3. Update CHANGELOG.md (if exists) with changes\n4. Create a release commit:\n   ```bash\n   git commit -am \"Release v0.2.0\"\n   ```\n5. Create a tag:\n   ```bash\n   git tag v0.2.0\n   git push origin v0.2.0\n   ```\n6. Create a GitHub release with release notes\n7. Publish to PyPI (maintainers only):\n   ```bash\n   python -m build\n   python -m twine upload dist/*\n   ```\n\n## Questions and Support\n\n- **Issues**: [GitHub Issues](https://github.com/adoptai/zapi/issues)\n- **Discussions**: [GitHub Discussions](https://github.com/adoptai/zapi/discussions)\n- **Website**: [adopt.ai](https://www.adopt.ai)\n- **Twitter**: [@getadoptai](https://twitter.com/getadoptai)\n- **LinkedIn**: [Adopt AI](https://www.linkedin.com/company/getadoptai)\n\n## Code of Conduct\n\n- Be respectful and inclusive\n- Provide constructive feedback\n- Focus on what is best for the community\n- Show empathy towards other contributors\n\n## License\n\nBy contributing to ZAPI, you agree that your contributions will be licensed under the MIT License.\n\nCopyright (c) 2025 AdoptAI\n\nSee [LICENSE](LICENSE) file for full license text.\n\n---\n\nThank you for contributing to ZAPI! Your contributions help make API discovery and LLM integration easier for everyone. 🚀\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2025 AdoptAI\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "# Include important files in the distribution\ninclude README.md\ninclude LICENSE\ninclude requirements.txt\ninclude CONTRIBUTING.md\n\n# Include all Python files in the package\nrecursive-include zapi *.py\n\n# Include examples\nrecursive-include examples *.py\n\n# Exclude development and build artifacts\nglobal-exclude __pycache__\nglobal-exclude *.py[cod]\nglobal-exclude *.so\nglobal-exclude .DS_Store\nglobal-exclude *.har\n"
  },
  {
    "path": "README.md",
    "content": "<h3 align=\"center\">\n  <a name=\"readme-top\"></a>\n  <img\n    src=\"https://asset.adopt.ai/web/icons/github_banner.png\">\n</h3>\n<div align=\"center\">\n<a href=\"https://GitHub.com/adoptai/zapi/graphs/contributors\">\n  <img src=\"https://img.shields.io/github/contributors/adoptai/zapi.svg\" alt=\"GitHub Contributors\">\n</a>\n<a href=\"https://www.adopt.ai\">\n  <img src=\"https://img.shields.io/badge/Visit-Adopt.AI-gr\" alt=\"Visit Adopt AI\">\n</a>\n</div>\n<div>\n  <p align=\"center\">\n    <a href=\"https://twitter.com/getadoptai\">\n      <img src=\"https://img.shields.io/badge/Follow%20on%20X-000000?style=for-the-badge&logo=x&logoColor=white\" alt=\"Follow on X\" />\n    </a>\n    <a href=\"https://www.linkedin.com/company/getadoptai\">\n      <img src=\"https://img.shields.io/badge/Follow%20on%20LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white\" alt=\"Follow on LinkedIn\" />\n    </a>\n  </p>\n</div>\n\n# ZAPI - Zero-Shot API Discovery\n\nZAPI by Adopt AI is an open-source Python library that automatically captures network traffic and API calls from web applications. Use it for API discovery, LLM training datasets, advanced API security analysis, and debugging complex browser workflows.\n\n## Highlights\n- Automated Playwright-powered browser sessions that inject auth tokens, capture traffic, export HAR logs, and upload them securely.\n- Built-in HAR filtering that excludes static assets, surfaces API-only entries, and provides upfront cost/time estimates before processing.\n- LangChain integration that converts documented APIs into ready-to-use tools, complete with type-safe schemas and optional custom headers.\n- Bring Your Own Key (BYOK) support for **Anthropic**, **OpenAI**, **Google**, and **Groq**, with AES-256-GCM encryption for every credential.\n- Comprehensive API reference, error handling helpers, and secure credential loading utilities so you can extend ZAPI safely.\n\n## Table of Contents\n- [Requirements & Installation](#requirements--installation)\n- [Environment Setup](#environment-setup)\n- [Project Structure](#project-structure)\n- [Quick Start](#quick-start)\n- [HAR Analysis & Cost Estimation](#har-analysis--cost-estimation)\n- [LangChain Integration](#langchain-integration)\n- [API Reference](#api-reference)\n- [Security & BYOK](#security--byok)\n- [Enhanced Discovery Workflow](#enhanced-discovery-workflow)\n- [Troubleshooting & Tips](#troubleshooting--tips)\n- [Contributing](#contributing)\n\n## Requirements & Installation\n\nZAPI targets **Python 3.9+**, **Playwright 1.40.0+**, and **cryptography 41.0.0+**.\n\n```bash\n# Install dependencies\npip install -r requirements.txt\n\n# Install browser binaries (REQUIRED)\nplaywright install\n```\n\n**Test the installation**\n\n```bash\npython demo.py\n```\n\n## Project Structure\n\n| Path | Purpose |\n|------|---------|\n| `zapi/core.py` | Home of the `ZAPI` class. Handles credential loading (`load_zapi_credentials()`), OAuth token exchange, BYOK encryption via `LLMKeyEncryption`, LangChain key propagation, and helper methods like `upload_har()` and `get_documented_apis()`. |\n| `zapi/session.py` | Contains the `BrowserSession` abstraction that wraps Playwright. Manages auth header injection, HAR recording, navigation helpers (`navigate`, `click`, `fill`, `wait_for`), and robust error handling plus synchronous wrappers. |\n| `demo.py` | End-to-end workflow script wired to the modules above. Launches a browser, lets you interact manually, saves the HAR (`session.dump_logs`), runs `analyze_har_file(..., save_filtered=True)`, lets you pick the filtered HAR, and finally calls `ZAPI.upload_har()`. Tweak `DEMO_URL`, `OUTPUT_FILE`, and `HEADLESS_BROWSER` at the top before running. |\n| `examples/langchain/` | LangChain integration docs and demo agent showing how `z.get_zapi_tools()` converts documented APIs into LangChain tools. |\n\nUse this as a map when extending ZAPI or debugging the flow.\n\n## Environment Setup\n\n1. Sign up at [app.adopt.ai](https://app.adopt.ai) to obtain your `ADOPT_CLIENT_ID`, `ADOPT_SECRET_KEY`, and BYOK token credentials before running ZAPI.\n2. Copy the example environment file and add your secrets:\n\n```bash\ncp .devenv .env\n```\n\n2. **Set up your environment:**\n   - Create a `.env` file in the root of the project.\n   - Populate it with the required variables:\n     ```env\n     # Required environment variables\n     LLM_API_KEY=your_llm_api_key_here\n     LLM_PROVIDER=anthropic                    # anthropic, openai, google, groq\n     LLM_MODEL_NAME=your_model_name_here      # Use the latest available model for your provider\n     ADOPT_CLIENT_ID=your_client_id_here       # Get from app.adopt.ai\n     ADOPT_SECRET_KEY=your_secret_key_here     # Get from app.adopt.ai\n     YOUR_API_URL=your_api_url_here            # Custom API URL\n     ```\n\nUse `load_llm_credentials()` (provided in the library) to load secrets safely when building custom tooling.\n\n## Quick Start\n\n### Launch, capture, analyze, and upload\n\n```python\nfrom zapi import ZAPI, analyze_har_file\n\n# Initialize ZAPI (automatically loads from .env file)\nz = ZAPI()\n\n# Launch browser and capture traffic\nsession = z.launch_browser(url=\"https://app.example.com/dashboard\")\n\n# Export network logs\nsession.dump_logs(\"session.har\")\n\n# Analyze HAR file before upload (optional but recommended)\nstats, report, _ = analyze_har_file(\"session.har\")\nprint(f\"API entries: {stats.valid_entries}, Estimated cost: ${stats.estimated_cost_usd:.2f}\")\n\n# Upload for enhanced API discovery\nif input(\"Upload? (y/n): \").lower() == 'y':\n    z.upload_har(\"session.har\")\n    print(\"Upload completed!\")\n\nsession.close()\n```\n\n> Prefer `python demo.py` for the full interactive experience. The script calls the same primitives shown above but adds guardrails: manual browser driving, HAR filtering, filtered/original upload prompts, and descriptive exception handling for every component (`ZAPI`, `BrowserSession`, HAR processing, networking, etc.).\n\n### LLM key management\n\n```python\nfrom zapi import ZAPI\n\n# Initialize ZAPI (loads configuration from .env)\nz = ZAPI()\n\n# Check configuration\nprint(f\"Provider: {z.get_llm_provider()}\")        # 'anthropic'\nprint(f\"Model: {z.get_llm_model_name()}\")         # Your configured model name\nprint(f\"Has key: {z.has_llm_key()}\")              # True\n\n# Update LLM configuration after initialization\nz.set_llm_key(\"openai\", \"sk-your-openai-key\", \"gpt-4\")\n\n# Access encrypted key (for debugging)\nencrypted_key = z.get_encrypted_llm_key()\ndecrypted_key = z.get_decrypted_llm_key()  # Use carefully\n```\n\n### Error handling example\n\n```python\ntry:\n    z = ZAPI(\n        client_id=\"invalid\",\n        secret=\"invalid\",\n        llm_provider=\"anthropic\",\n        llm_model_name=\"your-model-name\",  # Use the latest available model for your provider\n        llm_api_key=\"invalid-key\"\n    )\nexcept ZAPIAuthenticationError as e:\n    print(f\"Authentication failed: {e}\")\nexcept ZAPIValidationError as e:\n    print(f\"Input validation error: {e}\")\nexcept ZAPINetworkError as e:\n    print(f\"Network error: {e}\")\n```\n\n## HAR Analysis & Cost Estimation\n\nZAPI ships with a HAR analyzer that filters out static assets, surfaces API-only calls, and estimates processing cost/time before you upload.\n\n```python\nfrom zapi import analyze_har_file, HarProcessingError\n\ntry:\n    stats, report, filtered_file = analyze_har_file(\n        \"session.har\",\n        save_filtered=True,                 # Save filtered version with only API entries\n        filtered_output_path=\"api_only.har\" # Optional custom path\n    )\n\n    print(f\"Total entries: {stats.total_entries:,}\")\n    print(f\"API-relevant entries: {stats.valid_entries:,}\")\n    print(f\"Unique domains: {stats.unique_domains:,}\")\n    print(f\"Estimated cost: ${stats.estimated_cost_usd:.2f}\")\n    print(f\"Estimated time: {stats.estimated_time_minutes:.1f} minutes\")\n\n    print(\"\\nSkipped entries by reason:\")\n    for reason, count in stats.skipped_by_reason.items():\n        if count > 0:\n            print(f\"  {reason.replace('_', ' ').title()}: {count:,}\")\n\n    print(\"\\n\" + report)\n\nexcept HarProcessingError as e:\n    print(f\"HAR analysis failed: {e}\")\n```\n\n## LangChain Integration\n\nZAPI converts documented APIs into LangChain-compatible tools, so your agents can reason over real endpoints immediately.\n\n```python\nfrom langchain.agents import create_agent\nfrom zapi import ZAPI, interactive_chat\n\nz = ZAPI()\nagent = create_agent(\n    z.get_llm_model_name(),\n    z.get_zapi_tools(),  # One-liner to fetch and build all tools\n    system_prompt=\"You are a helpful assistant with access to APIs.\"\n)\n\ninteractive_chat(agent)\n```\n\nRun the interactive demo any time:\n\n```bash\npython examples/langchain/demo.py\n```\n\n**Tool anatomy**\n\n- `z.get_zapi_tools()` returns standard LangChain `Tool` objects (name, description, args schema) built from your documented APIs.\n- Tools automatically display which authentication headers were loaded (values stay hidden for security) so you always know what context the agent has.\n- Execution is routed through ZAPI, letting the agent call your APIs with consistent authentication, logging, and error handling.\n\n**Optional API headers**\n\nCreate `api-headers.json` in the repository root when you need to pass custom auth to all generated tools:\n\n```json\n{\n  \"headers\": {\n    \"Authorization\": \"Bearer your-api-token-here\",\n    \"X-API-Key\": \"your-api-key-here\",\n    \"X-Client-ID\": \"your-client-id-here\"\n  }\n}\n```\n\nShort variants:\n\n**Bearer token**\n```json\n{\n  \"headers\": {\n    \"Authorization\": \"Bearer sk_live_abc123...\"\n  }\n}\n```\n\n**API key**\n```json\n{\n  \"headers\": {\n    \"X-API-Key\": \"your_api_key_here\",\n    \"X-Client-ID\": \"your_client_id\"\n  }\n}\n```\n\n**Custom headers**\n```json\n{\n  \"headers\": {\n    \"X-Custom-Auth\": \"custom_value\",\n    \"X-Organization\": \"org_123\",\n    \"X-Tenant\": \"tenant_456\"\n  }\n}\n```\n\nZAPI will load the file automatically, hide secret values in logs, and apply the headers to every LangChain tool call. See the dedicated [LangChain Integration Guide](examples/langchain/README.md) for a deeper walkthrough, troubleshooting tips, and additional examples.\n\n## API Reference\n\n### ZAPI class\n\n`ZAPI(client_id, secret, llm_provider, llm_model_name, llm_api_key)`\n\n- `client_id` / `secret`: OAuth credentials from Adopt AI.\n- `llm_provider`: `\"groq\"`, `\"anthropic\"`, `\"openai\"`, or `\"google\"`.\n- `llm_model_name`: Any model identifier your provider supports. Use the latest available model for your provider (e.g., check your provider's documentation for current model names).\n- `llm_api_key`: Provider-specific API key (encrypted immediately per organization context).\n\nKey methods:\n\n- `launch_browser(url, headless=True, **playwright_options)`: Returns a `BrowserSession` that injects auth tokens into every request.\n- `set_llm_key(provider, api_key, model_name)`: Update provider credentials on the fly; keys are encrypted instantly.\n- `get_llm_provider()`, `get_llm_model_name()`, `has_llm_key()`: Inspect the active LLM configuration.\n- `get_encrypted_llm_key()`, `get_decrypted_llm_key()`: Access credential blobs when you must debug (handle decrypted values carefully).\n- `upload_har(filepath)`: Upload a HAR file with metadata for enhanced API discovery.\n- `get_documented_apis(page=1, page_size=10)`: Fetch paginated API documentation from the Adopt AI platform.\n\n### BrowserSession class\n\n| Method | Description |\n|--------|-------------|\n| `navigate(url, wait_until=\"networkidle\")` | Navigate to a URL. |\n| `click(selector, **kwargs)` | Click an element with Playwright under the hood. |\n| `fill(selector, value, **kwargs)` | Type into an input or textarea. |\n| `wait_for(selector=None, timeout=None)` | Wait for a selector or a timeout. |\n| `dump_logs(filepath)` | Export HAR traffic for later analysis. |\n| `close()` | Close the browser and clean up resources. |\n\n## Security & BYOK\n\n- ZAPI requires valid BYOK credentials to unlock enhanced discovery; every key is encrypted with **AES-256-GCM** as soon as it is provided.\n- No plaintext keys are stored in memory or logs, and transmission to the Adopt AI discovery service is secured with per-organization isolation.\n- Configure any supported provider by passing `(provider, model_name, api_key)` to `set_llm_key()` or by using the `.env` helpers.\n- `load_llm_credentials()` ensures secrets are loaded from disk without exposing them in code.\n- Providers currently supported: **Anthropic**, **OpenAI**, **Google**, **Groq**.\n\n## Enhanced Discovery Workflow\n\nWhen you bring your own LLM API key, ZAPI unlocks deeper API insights:\n\n**When to use BYOK**\n\n- Building LLM training datasets from API interactions.\n- Generating comprehensive API documentation.\n- Performing advanced API security analysis.\n- Understanding complex application workflows end to end.\n- Creating intelligent API testing scenarios.\n- Budgeting API discovery sessions with upfront estimates.\n\n**Example enhanced workflow**\n\n```python\nfrom zapi import ZAPI, analyze_har_file\n\nz = ZAPI()\n\nsession = z.launch_browser(url=\"https://app.example.com\")\n# ... navigate and interact ...\nsession.dump_logs(\"session.har\")\n\nstats, report, _ = analyze_har_file(\"session.har\")\nprint(f\"Found {stats.valid_entries} API entries\")\nprint(f\"Estimated cost: ${stats.estimated_cost_usd:.2f}\")\nprint(f\"Estimated time: {stats.estimated_time_minutes:.1f} minutes\")\n\nz.upload_har(\"session.har\")\nsession.close()\n```\n\n## Troubleshooting & Tips\n\n- If `HarProcessingError` appears, the HAR file is malformed or contains unsupported entries—rerun the capture or inspect the skipped reasons in the report.\n- ZAPI proceeds without authentication headers when `api-headers.json` is missing; add it only when needed and validate the JSON beforehand.\n- Tools will mention which headers were loaded, but the values stay hidden so you can safely confirm configuration without exposing secrets.\n- Always rerun `playwright install` after upgrading browsers or moving to a new machine.\n- Use `get_documented_apis()` to verify connectivity with the Adopt AI backend before launching long capture sessions.\n- Keep `.env` out of version control and rotate your BYOK tokens regularly through [app.adopt.ai](https://app.adopt.ai).\n\n## Contributing\n\nWe welcome contributions from the community! Whether you're fixing bugs, adding features, improving documentation, or adding support for new LLM providers, your help is appreciated.\n\n**Get Started:**\n- Read our [Contributing Guide](CONTRIBUTING.md) for development setup, coding standards, and pull request guidelines\n- Check out [open issues](https://github.com/adoptai/zapi/issues) for tasks to work on\n- Join discussions on [GitHub Discussions](https://github.com/adoptai/zapi/discussions)\n\n**Quick Links:**\n- [Development Setup](CONTRIBUTING.md#development-setup)\n- [Project Structure](CONTRIBUTING.md#project-structure)\n- [Adding New LLM Providers](CONTRIBUTING.md#adding-new-llm-providers)\n- [Pull Request Process](CONTRIBUTING.md#pull-request-process)\n\nBy contributing to ZAPI, you agree that your contributions will be licensed under the MIT License.\n"
  },
  {
    "path": "demo.py",
    "content": "#!/usr/bin/env python\n\"\"\"ZAPI Demo Script showing capture, analysis, and upload.\"\"\"\n\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom zapi import (\n    ZAPI,\n    BrowserInitializationError,\n    BrowserNavigationError,\n    BrowserSessionError,\n    HarProcessingError,\n    ZAPIAuthenticationError,\n    ZAPIError,\n    ZAPINetworkError,\n    ZAPIValidationError,\n    analyze_har_file,\n)\n\n# ---------------------------------------------------------------------------\n# Quick configuration – edit these defaults before running the script.\n# ---------------------------------------------------------------------------\nDEMO_URL = \"<INSERT_URL_HERE>\"\nOUTPUT_FILE = Path(\"demo_session.har\")\nHEADLESS_BROWSER = False\n\n\ndef record_session(zapi_client: ZAPI, url: str, output_path: Path) -> None:\n    \"\"\"Record a HAR file by letting the user drive the browser.\"\"\"\n    print(f\"🌐 Launching browser and navigating to: {url}\")\n    session = zapi_client.launch_browser(url=url, headless=HEADLESS_BROWSER)\n    try:\n        print(\"✅ Browser launched successfully!\")\n        input(\"📋 Use the browser freely, then press ENTER to save the HAR...\")\n\n        print(\"💾 Saving session logs...\")\n        session.dump_logs(str(output_path))\n        print(f\"✅ Session saved to: {output_path}\")\n    finally:\n        session.close()\n        print(\"🧹 Browser session closed.\")\n\n\ndef analyze_har_file_with_filter(source_path: Path) -> Optional[Path]:\n    \"\"\"Analyze the HAR and produce a filtered file for API-only calls.\"\"\"\n    print(\"\\n🔍 Analyzing HAR file...\")\n    try:\n        stats, report, filtered_path = analyze_har_file(str(source_path), save_filtered=True)\n    except HarProcessingError as exc:\n        print(f\"⚠️ HAR analysis failed: {exc}\")\n        print(\"   Continuing with the original HAR.\")\n        return None\n\n    print(\"\\n📊 HAR Analysis Results:\")\n    print(f\"   ✅ API-relevant entries: {stats.valid_entries:,}\")\n    print(f\"   💰 Estimated cost: ${stats.estimated_cost_usd:.2f}\")\n    print(f\"   ⏱️  Estimated processing time: {round(stats.estimated_time_minutes)} minutes\")\n    if filtered_path:\n        print(f\"   🧹 Filtered HAR saved to: {filtered_path}\")\n    return Path(filtered_path).resolve() if filtered_path else None\n\n\ndef pick_upload_file(original_path: Path, filtered_path: Optional[Path]) -> Path:\n    \"\"\"Interactively choose whether to upload the original or filtered HAR.\"\"\"\n    if filtered_path:\n        print(\"\\nYou now have two files available:\")\n        print(f\"  1. Original HAR : {original_path}\")\n        print(f\"  2. Filtered HAR : {filtered_path}\")\n        choice = input(\"Upload filtered HAR? (Y/n): \").strip().lower()\n        if choice in (\"\", \"y\", \"yes\"):\n            print(\"📤 Using filtered HAR for upload.\")\n            return filtered_path\n        print(\"📤 Using original HAR for upload.\")\n        return original_path\n\n    print(\"\\nFiltered HAR not available, defaulting to the original file.\")\n    return original_path\n\n\ndef main() -> int:\n    print(\"🚀 Starting ZAPI demo...\")\n    url = DEMO_URL\n    output_path = OUTPUT_FILE.expanduser().resolve()\n\n    try:\n        z = ZAPI()\n        record_session(z, url, output_path)\n\n        filtered_path = analyze_har_file_with_filter(output_path)\n        upload_path = pick_upload_file(output_path, filtered_path)\n\n        confirm = input(\"\\n💡 Ready to upload. Press ENTER to continue or 'n' to cancel: \").strip().lower()\n        if confirm in {\"n\", \"no\"}:\n            print(\"⏹️ Upload cancelled by user.\")\n            return 0\n\n        print(\"\\n☁️ Uploading HAR file...\")\n        z.upload_har(str(upload_path))\n        print(\"✅ HAR file uploaded successfully!\")\n        print(\"🎉 Demo completed successfully!\")\n\n    except ZAPIValidationError as e:\n        print(\"❌ Configuration Validation Error:\")\n        print(f\"   {str(e)}\")\n        print(\"💡 Please check your input values:\")\n        print(f\"   - URL: '{url}' (should be like 'https://example.com')\")\n        print(f\"   - Output file: '{output_path}' (should end with '.har')\")\n        print(\"   Make sure to replace placeholder values with actual ones.\")\n        return 1\n\n    except ZAPIAuthenticationError as e:\n        print(\"❌ Authentication Error:\")\n        print(f\"   {str(e)}\")\n        print(\"💡 Please check your credentials:\")\n        print(\"   - Make sure your account is active and has proper permissions\")\n        return 1\n\n    except ZAPINetworkError as e:\n        print(\"❌ Network Error:\")\n        print(f\"   {str(e)}\")\n        print(\"💡 This might be due to:\")\n        print(\"   - Internet connectivity issues\")\n        print(\"   - ZAPI service being temporarily unavailable\")\n        print(\"   - Firewall or proxy blocking the connection\")\n        print(\"   - DNS resolution problems\")\n        return 1\n\n    except BrowserNavigationError as e:\n        print(\"❌ Browser Navigation Error:\")\n        print(f\"   {str(e)}\")\n        print(\"💡 Common solutions:\")\n        print(f\"   - Check URL format: '{url}'\")\n        print(\"   - Ensure the website is accessible\")\n        print(\"   - Try a different URL for testing\")\n        print(\"   - Check your internet connection\")\n        return 1\n\n    except BrowserInitializationError as e:\n        print(\"❌ Browser Initialization Error:\")\n        print(f\"   {str(e)}\")\n        print(\"💡 This might be due to:\")\n        print(\"   - Missing browser dependencies (try: playwright install)\")\n        print(\"   - System permissions issues\")\n        print(\"   - Insufficient system resources\")\n        return 1\n\n    except BrowserSessionError as e:\n        print(\"❌ Browser Session Error:\")\n        print(f\"   {str(e)}\")\n        print(\"💡 Try the following:\")\n        print(\"   - Restart the script\")\n        print(\"   - Check if the browser window is responsive\")\n        print(\"   - Ensure sufficient disk space for HAR files\")\n        return 1\n\n    except HarProcessingError as e:\n        print(\"❌ HAR Processing Error:\")\n        print(f\"   {str(e)}\")\n        print(\"💡 This error occurred during HAR file analysis:\")\n        print(\"   - Check if the HAR file was generated correctly\")\n        print(\"   - Ensure the file is not corrupted or empty\")\n        print(\"   - Try generating a new session\")\n        return 1\n\n    except ZAPIError as e:\n        print(\"❌ ZAPI Error:\")\n        print(f\"   {str(e)}\")\n        print(\"💡 This is a general ZAPI error. Please check your configuration.\")\n        return 1\n\n    except Exception as e:\n        print(\"❌ Unexpected Error:\")\n        print(f\"   {str(e)}\")\n        print(\"💡 This is an unexpected error. Please:\")\n        print(\"   - Check all your input values\")\n        print(\"   - Try running the script again\")\n        print(\"   - Contact support if the issue persists\")\n        return 1\n\n    return 0\n\n\nif __name__ == \"__main__\":\n    exit(main())\n"
  },
  {
    "path": "docs/introduction.md",
    "content": "# Introducing ZAPI - Zero-Config API Intelligence\n\n**3 min read**\n\n_Automatically discover, capture, and document APIs from any web application_\n\nWe're excited to introduce **ZAPI** - an open-source Python library that automatically captures network traffic and API calls from web applications. Perfect for API discovery, creating LLM training datasets, and understanding how web applications communicate with their backends.\n\nZAPI makes it easy to:\n\n* **Capture network traffic** from any web application automatically\n* **Export HAR files** compatible with Chrome DevTools and other analysis tools\n* **Upload and document APIs** to the adopt.ai platform\n* **Interact with web pages** using simple Python commands\n* **Run headless or visible** browser sessions for debugging\n* **Retrieve documented APIs** with pagination support\n\n## Installation\n\nInstall ZAPI and its dependencies:\n\n```bash\npip install -r requirements.txt\n\n# Install browser binaries (REQUIRED)\nplaywright install\n```\n\n**Requirements:** Python 3.9+, Playwright 1.40.0+\n\n## Quick Start\n\n### 1. Get Your API Credentials\n\nZAPI uses OAuth authentication with the adopt.ai platform and supports LLM integration. You'll need:\n- A `client_id`\n- A `secret` key\n- An LLM `provider` (anthropic, openai, google, or groq)\n- An LLM `api_key` for your chosen provider\n- An LLM `model_name` (use the latest available model for your provider - check your provider's documentation for current model names)\n\n**Getting your client_id and secret:**\nSign up at [app.adopt.ai](https://app.adopt.ai) to get your OAuth credentials.\n\nAdd these to your environment or use them directly in your code.\n\n### 2. Your First API Capture\n\nStart ZAPI with just a few lines of code:\n\n```python\nfrom zapi import ZAPI\n\n# Initialize with client credentials and LLM configuration\nz = ZAPI(\n    client_id=\"YOUR_CLIENT_ID\",\n    secret=\"YOUR_SECRET\",\n    llm_provider=\"anthropic\",\n    llm_api_key=\"sk-ant-YOUR_API_KEY\",\n    llm_model_name=\"your-model-name\"  # Use the latest available model for your provider\n)\n\n# Launch browser and capture traffic\nsession = z.launch_browser(url=\"https://app.example.com/dashboard\")\n\n# Export network logs\nsession.dump_logs(\"session.har\")\nsession.close()\n```\n\nThe library will:\n1. Authenticates with the adopt.ai OAuth API\n2. Encrypts your LLM API key for secure tool ingestion\n3. Launches a browser with automatic token injection\n4. Capturees all network traffic during your session\n5. Exports everything to standard HAR format with encrypted LLM metadata\n\n### 3. Test Your Installation\n\nYou can also load credentials from a `.env` file:\n\n```bash\n# Create .env file with your credentials\necho \"LLM_PROVIDER=anthropic\" >> .env\necho \"LLM_API_KEY=sk-ant-your-key-here\" >> .env\necho \"LLM_MODEL_NAME=your-model-name\" >> .env  # Use the latest available model for your provider\n```\n\nRun the demo script to verify everything works:\n\n```bash\npython demo.py\n```\n\n## LLM Integration & Security\n\n### Supported LLM Providers\n\nZAPI supports 4 main LLM providers with full validation:\n\n- **Anthropic**\n- **OpenAI**:\n- **Google**:\n- **Groq**:\n\n### Secure Key Encryption\n\nAll LLM API keys are encrypted before being used for tool ingestion:\n\n```python\n# Keys are automatically encrypted when ZAPI is initialized\nz = ZAPI(\n    client_id=\"YOUR_CLIENT_ID\",\n    secret=\"YOUR_SECRET\",\n    llm_provider=\"anthropic\",\n    llm_api_key=\"sk-ant-your-key\",  # Encrypted automatically\n    llm_model_name=\"your-model-name\"  # Use the latest available model for your provider\n)\n\n# Check if LLM key is configured\nif z.has_llm_key():\n    print(f\"Using provider: {z.get_llm_provider()}\")\n    print(f\"Using model: {z.get_llm_model_name()}\")\n```\n\n## Core Features & Examples\n\n### Uploading to adopt.ai\n\nOnce you've captured traffic, upload it to the adopt.ai platform for automatic API documentation:\n\n```python\nz = ZAPI(\n    client_id=\"YOUR_CLIENT_ID\",\n    secret=\"YOUR_SECRET\",\n    llm_provider=\"anthropic\",\n    llm_api_key=\"sk-ant-YOUR_API_KEY\",\n    llm_model_name=\"your-model-name\"  # Use the latest available model for your provider\n)\n\n# Capture traffic\nsession = z.launch_browser(url=\"https://app.example.com\")\nsession.dump_logs(\"session.har\")\nsession.close()\n\n# Upload for documentation (includes encrypted LLM metadata)\nz.upload_har(\"session.har\")\n```\n\nThe adopt.ai platform will:\n- Parse all API calls from your HAR file\n- Generate documentation automatically\n- Use your encrypted LLM key for enhanced processing\n- Make APIs available for LLM agents and tools\n\n### HAR Analysis & Cost Estimation\n\nBefore uploading, analyze your HAR files to understand what will be processed and estimate costs:\n\n```python\nfrom zapi import analyze_har_file, HarProcessingError\n\ntry:\n    # Analyze HAR file with detailed statistics\n    stats, report, filtered_file = analyze_har_file(\n        \"session.har\",\n        save_filtered=True,           # Save filtered version with only API entries\n        filtered_output_path=\"api_only.har\"  # Optional custom path\n    )\n\n    # Access detailed statistics\n    print(f\"Total entries: {stats.total_entries:,}\")\n    print(f\"API-relevant entries: {stats.valid_entries:,}\")\n    print(f\"Unique domains: {stats.unique_domains:,}\")\n    print(f\"Estimated cost: ${stats.estimated_cost_usd:.2f}\")\n    print(f\"Estimated time: {stats.estimated_time_minutes:.1f} minutes\")\n\n    # Show which entries were filtered out and why\n    print(\"\\nSkipped entries by reason:\")\n    for reason, count in stats.skipped_by_reason.items():\n        if count > 0:\n            print(f\"  {reason.replace('_', ' ').title()}: {count:,}\")\n\n    # Print full formatted report\n    print(\"\\n\" + report)\n\nexcept HarProcessingError as e:\n    print(f\"HAR analysis failed: {e}\")\n```\n\n**HAR Processing Features:**\n- **Smart Filtering**: Automatically excludes static assets (JS, CSS, images, fonts)\n- **Cost Estimation**: Provides processing cost estimates\n- **Time Estimation**: Calculates expected processing time\n- **Domain Analysis**: Lists all unique domains found in the session\n- **Skip Reasons**: Detailed breakdown of why entries were filtered out\n- **Filtered Export**: Option to save a clean HAR file with only API-relevant entries\n\n### Retrieving Documented APIs\n\nAfter uploading, retrieve your documented APIs programmatically:\n\n```python\nz = ZAPI(\n    client_id=\"YOUR_CLIENT_ID\",\n    secret=\"YOUR_SECRET\",\n    llm_provider=\"groq\",\n    llm_api_key=\"gsk_YOUR_GROQ_KEY\",\n    llm_model_name=\"mixtral-8x7b-32768\"\n)\n\n# Get first page of documented APIs\napi_list = z.get_documented_apis(page=1, page_size=10)\n\n# Paginate through all APIs\nfor page in range(1, api_list['total_pages'] + 1):\n    apis = z.get_documented_apis(page=page, page_size=10)\n    for api in apis['items']:\n        print(f\"{api['title']}: {api['path']}\")\n```\n\n### Visible Browser Mode for Debugging\n\nWhen developing or debugging, run with a visible browser:\n\n```python\n# See the browser in action\nsession = z.launch_browser(\n    url=\"https://app.example.com\",\n    headless=False  # Makes browser visible\n)\n\n# Great for debugging selectors and interactions\ninput(\"Press ENTER when done navigating...\")\nsession.dump_logs(\"debug_session.har\")\nsession.close()\n```\n\n## Advanced Usage\n\n### Custom Playwright Options\n\nPass any Playwright browser launch options:\n\n```python\nsession = z.launch_browser(\n    url=\"https://app.example.com\",\n    headless=True,\n    wait_until=\"networkidle\",  # Wait for network to be idle\n    slow_mo=50,  # Slow down operations by 50ms\n    timeout=30000  # 30 second timeout\n)\n```\n\n## Best Practices\n\n### 1. Use Descriptive HAR Filenames\n\n```python\n# Good - descriptive names\nsession.dump_logs(\"checkout-flow-2024-11-05.har\")\nsession.dump_logs(\"user-authentication-session.har\")\n\n# Less helpful\nsession.dump_logs(\"session1.har\")\nsession.dump_logs(\"test.har\")\n```\n\n### 2. Organize HAR Files by Feature\n\n```\ncaptures/\n├── authentication/\n│   ├── login-flow.har\n│   └── oauth-callback.har\n├── checkout/\n│   ├── cart-operations.har\n│   └── payment-processing.har\n└── admin/\n    └── user-management.har\n```\n\n### 3. Always Close Sessions\n\nUse context managers or explicit `close()` calls to clean up resources:\n\n```python\n# Option 1: Context manager (preferred)\nwith z.launch_browser(url=\"...\") as session:\n    # Your code here\n    pass\n\n# Option 2: Explicit close\nsession = z.launch_browser(url=\"...\")\ntry:\n    # Your code here\n    pass\nfinally:\n    session.close()\n```\n\n### 4. Complete Workflow with Analysis\n\nHere's a complete workflow that includes HAR analysis and cost estimation:\n\n```python\nfrom zapi import ZAPI, load_llm_credentials, analyze_har_file\n\n# Load credentials securely\nllm_provider, llm_api_key, llm_model_name = load_llm_credentials()\n\n# Initialize ZAPI\nz = ZAPI(\n    client_id=\"YOUR_CLIENT_ID\",\n    secret=\"YOUR_SECRET\",\n    llm_provider=llm_provider,\n    llm_api_key=llm_api_key,\n    llm_model_name=llm_model_name\n)\n\n# Capture session\nsession = z.launch_browser(url=\"https://app.example.com\")\n# ... navigate and interact ...\nsession.dump_logs(\"session.har\")\nsession.close()\n\n# Analyze before upload with cost estimation\nstats, report, _ = analyze_har_file(\"session.har\")\nprint(f\"Found {stats.valid_entries} API entries\")\nprint(f\"Estimated cost: ${stats.estimated_cost_usd:.2f}\")\nprint(f\"Estimated time: {stats.estimated_time_minutes:.1f} minutes\")\n\n# Upload with confirmation\nif input(\"Upload? (y/n): \").lower() == 'y':\n    z.upload_har(\"session.har\")\n    print(\"Upload completed!\")\n```\n\n## API Reference\n\n### ZAPI Class\n\n**`ZAPI(client_id, secret, llm_provider, llm_model_name, llm_api_key)`**\n- `client_id` (str): OAuth client ID for authentication\n- `secret` (str): OAuth secret key\n- `llm_provider` (str): LLM provider name (\"anthropic\", \"openai\", \"google\", \"groq\")\n- `llm_model_name` (str): LLM model name. Use the latest available model for your provider (check your provider's documentation for current model names)\n- `llm_api_key` (str): LLM API key for the specified provider\n- Raises `ZAPIValidationError` if credentials are empty or LLM key format is invalid\n- Raises `ZAPIAuthenticationError` if authentication fails\n- Raises `ZAPINetworkError` if network requests fail\n\n**`launch_browser(url, headless=True, wait_until=\"load\", **playwright_options)`**\n- Returns: `BrowserSession` instance\n- `url` (str): Initial URL to navigate to\n- `headless` (bool): Run browser in headless mode\n- `wait_until` (str): When navigation is complete (\"load\", \"domcontentloaded\", \"networkidle\")\n\n**`upload_har(har_file)`**\n- Uploads HAR file to adopt.ai for API documentation\n- `har_file` (str): Path to HAR file\n- Includes encrypted LLM metadata if LLM key is configured\n- Returns: JSON response from API\n\n**`set_llm_key(provider, api_key, model_name)`**\n- Update LLM configuration after initialization\n- `provider` (str): LLM provider name\n- `api_key` (str): API key for the provider\n- `model_name` (str): Model name to use\n\n**`has_llm_key()`**\n- Returns: True if LLM key is configured, False otherwise\n\n**`get_llm_provider()`**\n- Returns: Configured LLM provider name or None\n\n**`get_llm_model_name()`**\n- Returns: Configured LLM model name or None\n\n**`get_documented_apis(page=1, page_size=10)`**\n- Retrieves documented APIs with pagination\n- `page` (int): Page number (default: 1)\n- `page_size` (int): Items per page (default: 10)\n- Returns: JSON with `items`, `total`, `page`, `page_size`, `total_pages`\n\n### HAR Analysis Functions\n\n**`analyze_har_file(har_file_path, save_filtered=False, filtered_output_path=None)`**\n- Comprehensive HAR file analysis with statistics and filtering\n- `har_file_path` (str): Path to the HAR file to analyze\n- `save_filtered` (bool): Whether to save a filtered HAR file with only API entries\n- `filtered_output_path` (str): Optional path for filtered HAR file (auto-generated if None)\n- Returns: `(HarStats, formatted_report, filtered_file_path)` tuple\n- Automatically excludes static assets and non-API content\n- Provides cost and time estimates for processing\n\n**`load_llm_credentials()`**\n- Load LLM credentials securely from environment variables or configuration\n- Returns: `(provider, api_key, model_name)` tuple\n- Supports .env files and fallback configuration\n\n**`HarProcessor(har_file_path)`**\n- Low-level HAR processing class for custom analysis\n- Methods: `load_and_process()`, `save_filtered_har()`, `get_summary_report()`\n\n### HarStats Object\n\n```python\n@dataclass\nclass HarStats:\n    total_entries: int              # Total entries in HAR file\n    valid_entries: int              # API-relevant entries after filtering\n    skipped_entries: int            # Entries filtered out\n    unique_domains: int             # Number of unique domains\n    estimated_cost_usd: float       # Estimated processing cost\n    estimated_time_minutes: float   # Estimated processing time\n    skipped_by_reason: Dict[str, int]  # Breakdown by skip reason\n    domains: List[str]              # List of all domains found\n```\n\n### BrowserSession Class\n\n| Method | Description |\n|--------|-------------|\n| `navigate(url, wait_until=\"networkidle\")` | Navigate to URL |\n| `click(selector, **kwargs)` | Click element by CSS selector |\n| `fill(selector, value, **kwargs)` | Fill form field |\n| `wait_for(selector=None, timeout=None)` | Wait for selector or timeout |\n| `dump_logs(filepath)` | Export HAR file |\n| `close()` | Close browser and cleanup |\n\n## How ZAPI Works\n\nZAPI's workflow is simple but powerful:\n\n1. **Authentication**: Calls the adopt.ai OAuth API to obtain an access token\n2. **LLM Key Encryption**: Encrypts your LLM API key for secure tool ingestion\n3. **Token Injection**: Automatically injects the Bearer token in all request headers\n4. **Traffic Capture**: Records complete network activity during browser interactions\n5. **Smart Analysis**: Filters HAR files to exclude static assets and estimate costs\n6. **Export**: Saves everything to standard HAR format compatible with Chrome DevTools\n7. **Documentation**: Uploads to adopt.ai with secured LLM metadata for enhanced API processing\n\n## Use Cases\n\n- **API Discovery**: Reverse-engineer undocumented APIs from web applications\n- **LLM Training Data**: Create datasets of API calls for training language models\n- **Testing & QA**: Capture network traffic for debugging and analysis\n- **Documentation**: Automatically generate API documentation from real usage\n- **Integration Development**: Understand third-party APIs without documentation\n- **Security Research**: Analyze application behavior and API communication patterns\n\n## Get Started Today\n\nInstall ZAPI and start discovering APIs:\n\n```bash\npip install -r requirements.txt\nplaywright install\n\n# Set up your .env file with credentials\necho \"LLM_PROVIDER=anthropic\" >> .env\necho \"LLM_API_KEY=sk-ant-your-key\" >> .env\necho \"LLM_MODEL_NAME=your-model-name\" >> .env  # Use the latest available model for your provider\n\npython demo.py\n```\n\nJoin the community and contribute:\n\n* **GitHub**: https://github.com/adoptai/zapi\n* **adopt.ai Platform**: https://app.adopt.ai\n* **License**: MIT\n"
  },
  {
    "path": "examples/async_usage.py",
    "content": "\"\"\"\nAdvanced async usage example for ZAPI.\n\nThis demonstrates how to use the async API directly for concurrent\noperations or integration with async frameworks.\n\"\"\"\n\nimport asyncio\n\nfrom zapi.session import BrowserSession\n\n\nasync def main():\n    print(\"Advanced async usage example\\n\")\n\n    # Example 1: Using async methods directly\n    print(\"Example 1: Direct async API usage\")\n    session = BrowserSession(auth_token=\"YOUR_TOKEN\", headless=True)\n\n    await session._initialize(initial_url=\"https://app.example.com\")\n    await session._wait_for_async(timeout=2000)\n    await session._dump_logs_async(\"async_example1.har\")\n    await session._close_async()\n    print(\"✓ HAR file saved to async_example1.har\\n\")\n\n    # Example 2: Concurrent sessions (multiple browsers at once)\n    print(\"Example 2: Running multiple sessions concurrently\")\n\n    async def capture_session(url, output_file):\n        \"\"\"Helper to capture a session.\"\"\"\n        session = BrowserSession(auth_token=\"YOUR_TOKEN\", headless=True)\n        await session._initialize(initial_url=url)\n        await session._wait_for_async(timeout=1000)\n        await session._dump_logs_async(output_file)\n        await session._close_async()\n        print(f\"✓ Captured {url} -> {output_file}\")\n\n    # Run multiple sessions concurrently\n    await asyncio.gather(\n        capture_session(\"https://api.example.com/v1/users\", \"async_users.har\"),\n        capture_session(\"https://api.example.com/v1/products\", \"async_products.har\"),\n        capture_session(\"https://api.example.com/v1/orders\", \"async_orders.har\"),\n    )\n    print(\"\\n✓ All concurrent sessions completed\\n\")\n\n    # Example 3: Async context manager\n    print(\"Example 3: Using async context manager\")\n    session = BrowserSession(auth_token=\"YOUR_TOKEN\", headless=True)\n    await session._initialize(initial_url=\"https://app.example.com\")\n\n    async with session:\n        await session._navigate_async(\"/dashboard\")\n        await session._wait_for_async(timeout=2000)\n        await session._dump_logs_async(\"async_context.har\")\n    print(\"✓ HAR file saved to async_context.har (auto-cleanup)\\n\")\n\n    print(\"All async examples completed!\")\n\n\nif __name__ == \"__main__\":\n    asyncio.run(main())\n"
  },
  {
    "path": "examples/basic_usage.py",
    "content": "\"\"\"\nBasic usage example for ZAPI.\n\nThis demonstrates the minimal API for launching a browser,\nnavigating to a URL, and capturing network logs in HAR format.\n\"\"\"\n\nfrom zapi import ZAPI\n\n\ndef main():\n    # Example 1: Basic usage\n    print(\"Example 1: Basic ZAPI usage\")\n    z = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n    session = z.launch_browser(url=\"https://app.example.com/dashboard\")\n\n    # The session is already on the dashboard page\n    # You can interact with it if needed\n    session.wait_for(timeout=2000)  # Wait 2 seconds\n\n    # Dump network logs to HAR file\n    session.dump_logs(\"example1_session.har\")\n    session.close()\n    print(\"✓ HAR file saved to example1_session.har\\n\")\n\n    # Example 2: Multi-page navigation with interactions\n    print(\"Example 2: Multi-page navigation with interactions\")\n    z2 = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n    session2 = z2.launch_browser(url=\"https://app.example.com\")\n\n    # Navigate to different pages\n    session2.navigate(\"/dashboard\")\n    session2.wait_for(timeout=1000)\n\n    session2.navigate(\"/profile\")\n    session2.wait_for(timeout=1000)\n\n    # Click on an element (example)\n    # session2.click(\"#settings-button\")\n\n    # Fill a form (example)\n    # session2.fill(\"#search-input\", \"test query\")\n\n    session2.dump_logs(\"example2_session.har\")\n    session2.close()\n    print(\"✓ HAR file saved to example2_session.har\\n\")\n\n    # Example 3: Using as context manager (auto-cleanup)\n    print(\"Example 3: Using context manager for automatic cleanup\")\n    z3 = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n    session3 = z3.launch_browser(url=\"https://app.example.com\")\n\n    with session3:\n        session3.navigate(\"/api-endpoint\")\n        session3.wait_for(timeout=2000)\n        session3.dump_logs(\"example3_session.har\")\n    # Browser automatically closed when exiting context\n    print(\"✓ HAR file saved to example3_session.har (auto-cleanup)\\n\")\n\n    print(\"All examples completed! Check the generated .har files.\")\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "examples/langchain/README.md",
    "content": "# ZAPI LangChain Integration\n\nThis example demonstrates how to use ZAPI with LangChain to automatically convert your documented APIs into LangChain tools.\n\n## Quick Start\n\n### 1. Basic Usage (Recommended)\n\n```python\nfrom langchain.agents import create_agent\nfrom zapi import ZAPI, interactive_chat\n\n# Initialize ZAPI and create agent\nz = ZAPI()\n\n# Get ZAPI tools automatically\nagent = create_agent(\n    z.get_llm_model_name(),\n    z.get_zapi_tools(),  # Simple one-liner to get all tools\n    system_prompt=\"You are a helpful assistant with access to APIs.\"\n)\n\n# Start interactive chat\ninteractive_chat(agent)\n```\n\n### 2. Run the Demo\n\n```bash\npython demo.py\n```\n\n## Optional: Custom API Authentication Headers\n\nIf your APIs require custom authentication headers, you can provide them via a JSON file.\n\n### Create API Headers File\n\nCreate a file named `api-headers.json` in the `zapi/` root directory:\n\n```json\n{\n  \"headers\": {\n    \"Authorization\": \"Bearer your-api-token-here\",\n    \"X-API-Key\": \"your-api-key-here\",\n    \"X-Client-ID\": \"your-client-id-here\"\n  }\n}\n```\n\n### Header Examples\n\n**Bearer Token Authentication:**\n```json\n{\n  \"headers\": {\n    \"Authorization\": \"Bearer sk_live_abc123...\"\n  }\n}\n```\n\n**API Key Authentication:**\n```json\n{\n  \"headers\": {\n    \"X-API-Key\": \"your_api_key_here\",\n    \"X-Client-ID\": \"your_client_id\"\n  }\n}\n```\n\n**Custom Headers:**\n```json\n{\n  \"headers\": {\n    \"X-Custom-Auth\": \"custom_value\",\n    \"X-Organization\": \"org_123\",\n    \"X-Tenant\": \"tenant_456\"\n  }\n}\n```\n\n## Usage\n\n```python\nfrom zapi import ZAPI\n\nz = ZAPI()\ntools = z.get_zapi_tools()  # Automatically loads api-headers.json if it exists\n```\n\nThat's it! The `get_zapi_tools()` method automatically:\n- Fetches your documented APIs from ZAPI platform\n- Loads authentication headers from `api-headers.json` (if present)\n- Converts APIs into LangChain-compatible tools\n\n## Creating an Agent\n\nZAPI works seamlessly with LangChain's agent framework. Here's the complete flow:\n\n```python\nfrom langchain.agents import create_agent\nfrom zapi import ZAPI, interactive_chat\n\n# 1. Initialize ZAPI\nz = ZAPI()\n\n# 2. Create agent with ZAPI tools\nagent = create_agent(\n    z.get_llm_model_name(),      # Gets the LLM model (use the latest available model for your provider)\n    z.get_zapi_tools(),           # Gets all your documented APIs as tools\n    system_prompt=\"You are a helpful assistant with access to APIs.\"\n)\n\n# 3. Start chatting!\ninteractive_chat(agent)\n```\n\n### What happens here?\n\n- **`z.get_llm_model_name()`**: Returns the LLM model name configured in your ZAPI credentials\n- **`z.get_zapi_tools()`**: Fetches and converts your APIs into LangChain tools\n- **`create_agent()`**: Creates a LangChain agent that can use your APIs\n- **`interactive_chat()`**: Starts an interactive terminal chat session with the agent\n\nThe agent will automatically:\n- Understand when to call your APIs based on user queries\n- Extract parameters from natural language\n- Execute API calls through ZAPI\n- Present results in a conversational format\n\n## Security Notes\n\n- **Never commit your actual API keys to version control**\n- Add `api-headers.json` to your `.gitignore` file\n- Use environment-specific headers files for different environments\n- The tool will show which headers are loaded but won't display their values for security\n\n## What ZAPI Does\n\n1. **Fetches Documented APIs**: Retrieves all APIs you've documented in ZAPI platform\n2. **Converts to LangChain Tools**: Automatically creates LangChain tools with proper schemas\n3. **Handles Authentication**: Applies custom headers (if provided) to all API requests\n4. **Executes API Calls**: Routes tool calls through ZAPI backend for execution\n\n## Features\n\n- ✅ **Zero-config**: Works out of the box with `z.get_zapi_tools()`\n- ✅ **Type-safe**: Automatically generates proper parameter schemas\n- ✅ **Flexible auth**: Supports custom headers via JSON file\n- ✅ **Error handling**: Gracefully handles API failures\n- ✅ **Interactive chat**: Built-in `interactive_chat()` utility\n\n## File Structure\n\n```\nzapi/\n├── api-headers.json        # Optional: Your API headers (don't commit this!)\n├── examples/\n│   └── langchain/\n│       ├── demo.py         # Demo script\n│       └── README.md       # This file\n└── ...\n```\n\n## Troubleshooting\n\n- If no headers file is found, the tool will proceed without authentication headers\n- Check the console output for confirmation that headers were loaded\n- Ensure your JSON file is valid (use a JSON validator if needed)\n- Make sure you have documented APIs in your ZAPI platform account\n"
  },
  {
    "path": "examples/langchain/__init__.py",
    "content": "\"\"\"\nZAPI Langchain Examples\n\nThis package contains comprehensive examples showing how to use ZAPI\nwith Langchain to create intelligent agents.\n\nExamples:\n- demo.py: Agent creation and usage demonstration\n\"\"\"\n"
  },
  {
    "path": "examples/langchain/demo.py",
    "content": "from langchain.agents import create_agent\nfrom zapi import ZAPI, interactive_chat\n\n\ndef demo_zapi_langchain():\n    \"\"\"ZAPI LangChain integration demo.\"\"\"\n    print(\"\\n🚀 ZAPI LangChain - Demo Example\")\n    print(\"=\" * 40)\n\n    # Initialize ZAPI and create agent\n    z = ZAPI()\n\n    agent = create_agent(\n        z.get_llm_model_name(), z.get_zapi_tools(), system_prompt=\"You are a helpful assistant with access to APIs.\"\n    )\n\n    # Start interactive chat\n    interactive_chat(agent, debug_mode=False)\n\n\n# Run the demo\ndemo_zapi_langchain()\n"
  },
  {
    "path": "examples/llm_keys_usage.py",
    "content": "\"\"\"\nExample demonstrating LLM API key management with ZAPI.\n\nThis shows how to securely provide LLM API keys for the 4 main supported providers.\nKeys will be encrypted and transmitted to the adopt.ai discovery service.\n\nSupported providers: Anthropic, OpenAI, Google, Groq\n\"\"\"\n\nfrom zapi import ZAPI, LLMProvider\n\n\ndef main():\n    # Example 1: Initialize ZAPI with single LLM key in constructor (Anthropic primary)\n    print(\"Example 1: ZAPI with single LLM key in constructor (Anthropic primary)\")\n\n    # Single key approach - one provider per client instance\n    z = ZAPI(\n        client_id=\"YOUR_CLIENT_ID\",\n        secret=\"YOUR_SECRET\",\n        llm_provider=\"anthropic\",  # Primary supported provider\n        llm_api_key=\"sk-ant-your-anthropic-key-here\",\n    )\n\n    print(f\"Configured provider: {z.get_llm_provider()}\")\n    print(f\"Has LLM key: {z.has_llm_key()}\")\n\n    # Launch browser and capture session\n    session = z.launch_browser(url=\"https://app.example.com\", headless=False)\n    input(\"Navigate around the app, then press ENTER to continue...\")\n\n    # Export HAR with encrypted LLM key\n    session.dump_logs(\"example_with_key.har\")\n\n    # Upload to adopt.ai with encrypted key\n    z.upload_har(\"example_with_key.har\")\n\n    session.close()\n    print(\"✓ Session completed with encrypted LLM key included\\n\")\n\n    # Example 2: Set LLM key after initialization\n    print(\"Example 2: Setting LLM key after initialization\")\n\n    z2 = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n    print(f\"Initially has key: {z2.has_llm_key()}\")\n\n    # Add key later - showcasing one of the 4 main providers\n    z2.set_llm_key(\"anthropic\", \"sk-ant-another-key-here\")\n\n    print(f\"After setting key: {z2.has_llm_key()}\")\n    print(f\"Configured provider: {z2.get_llm_provider()}\")\n\n    # Example 3: Multiple provider support (single provider per client)\n    print(\"\\nExample 3: Using different providers (create separate clients)\")\n\n    # OpenAI example\n    z3a = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n    z3a.set_llm_key(\"openai\", \"sk-your-openai-key-here\")\n    print(f\"OpenAI client provider: {z3a.get_llm_provider()}\")\n\n    # Groq example\n    z3b = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n    z3b.set_llm_key(\"groq\", \"gsk_your-groq-key-here\")\n    print(f\"Groq client provider: {z3b.get_llm_provider()}\")\n\n    # Google example\n    z3c = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n    z3c.set_llm_key(\"google\", \"your-google-api-key-here\")\n    print(f\"Google client provider: {z3c.get_llm_provider()}\")\n\n    # Example 4: Working without LLM keys (backward compatibility)\n    print(\"\\nExample 4: Working without LLM keys (backward compatibility)\")\n\n    z4 = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n    print(f\"Has LLM key: {z4.has_llm_key()}\")\n\n    # This will work exactly as before - no encrypted keys sent\n    session4 = z4.launch_browser(url=\"https://app.example.com\")\n    session4.wait_for(timeout=1000)\n    session4.dump_logs(\"example_no_keys.har\")\n    z4.upload_har(\"example_no_keys.har\")  # byok_enabled: false\n    session4.close()\n    print(\"✓ Session completed without LLM keys (legacy mode)\")\n\n    # Example 5: Show all 4 supported providers\n    print(\"\\nExample 5: All 4 main supported LLM providers\")\n    print(f\"All supported providers: {list(LLMProvider.get_all_providers())}\")\n\n    from zapi.providers import get_supported_providers_info, is_primary_provider\n\n    providers_info = get_supported_providers_info()\n    for provider_name, info in providers_info.items():\n        support_level = \"🔥 PRIMARY\" if is_primary_provider(provider_name) else \"⭐ MAIN\"\n        print(f\"- {info['display_name']}: {support_level} - {info['description']}\")\n\n    print(\"\\n💡 ZAPI supports 4 main providers: Anthropic, OpenAI, Google, Groq\")\n    print(\"   Each client handles one provider's key for security and simplicity.\")\n    print(\"   All providers have complete validation and optimized integration.\")\n\n    # Example 6: Demonstrating API key format validation\n    print(\"\\nExample 6: API key format validation for each provider\")\n\n    key_examples = {\n        \"anthropic\": \"sk-ant-api03-example-key-here\",\n        \"openai\": \"sk-your-openai-key-here\",\n        \"groq\": \"gsk_your-groq-key-here\",\n        \"google\": \"your-google-api-key-here\",\n    }\n\n    for provider, example_key in key_examples.items():\n        print(f\"- {provider.title()}: {example_key[:15]}...\")\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "examples/simple_usage.py",
    "content": "\"\"\"\nSimplest possible ZAPI usage - exactly as shown in documentation.\n\"\"\"\n\nfrom zapi import ZAPI\n\n\ndef main():\n    # Create ZAPI instance with your client credentials\n    z = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n\n    # Launch browser and navigate to URL\n    session = z.launch_browser(url=\"https://app.example.com/dashboard\")\n\n    # Dump network logs to HAR file\n    session.dump_logs(\"session.har\")\n\n    # Clean up\n    session.close()\n\n    print(\"✓ Network logs saved to session.har\")\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\"setuptools>=61.0\", \"wheel\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"zapi\"\nversion = \"0.1.0\"\ndescription = \"Zero-Config API Intelligence - automatically discover, understand, and prepare APIs for LLM and agent workflows\"\nreadme = \"README.md\"\nrequires-python = \">=3.9\"\nlicense = {text = \"MIT\"}\nauthors = [\n    {name = \"ZAPI Contributors\"}\n]\nkeywords = [\"api\", \"llm\", \"automation\", \"browser\", \"network\", \"har\"]\nclassifiers = [\n    \"Development Status :: 3 - Alpha\",\n    \"Intended Audience :: Developers\",\n    \"License :: OSI Approved :: MIT License\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3.9\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\",\n]\ndependencies = [\n    \"playwright>=1.40.0\",\n    \"cryptography>=41.0.0\",\n    \"httpx>=0.25.0\",\n    \"pydantic>=2.0.0\",\n    \"python-dotenv>=1.0.0\",\n    \"langchain>=1.0.0\",\n    \"langchain-anthropic>=1.0.0\",\n    \"langchain-openai>=1.0.0\",\n    \"click>=8.0.0\",\n]\n\n[project.urls]\nHomepage = \"https://github.com/adoptai/zapi\"\nRepository = \"https://github.com/adoptai/zapi\"\n\n[project.scripts]\nzapi = \"zapi.cli:cli\"\n\n[tool.setuptools.packages.find]\nwhere = [\".\"]\n\n\ninclude = [\"zapi*\"]\n\n[tool.ruff]\n# Set the maximum line length\nline-length = 120\n\n# Target Python 3.9+\ntarget-version = \"py39\"\n\n# Exclude common directories\nexclude = [\n    \".git\",\n    \".github\",\n    \".venv\",\n    \"venv\",\n    \"__pycache__\",\n    \"*.egg-info\",\n    \"build\",\n    \"dist\",\n    \"docs\",\n    \".pytest_cache\",\n    \".ruff_cache\",\n]\n\n[tool.ruff.lint]\n# Enable specific rule sets\nselect = [\n    \"E\",   # pycodestyle errors\n    \"W\",   # pycodestyle warnings\n    \"F\",   # pyflakes\n    \"I\",   # isort\n    \"N\",   # pep8-naming\n    \"UP\",  # pyupgrade\n    \"B\",   # flake8-bugbear\n    \"C4\",  # flake8-comprehensions\n    \"SIM\", # flake8-simplify\n]\n\n# Ignore specific rules\nignore = [\n    \"E501\",  # Line too long (handled by formatter)\n    \"B008\",  # Do not perform function calls in argument defaults\n    \"B905\",  # zip() without an explicit strict= parameter\n    \"B904\",  # Within except clause, raise with from err - too strict for this codebase\n    \"SIM105\",  # Use contextlib.suppress() instead of try-except-pass - we prefer explicit try-except for clarity\n]\n\n# Allow autofix for all enabled rules\nfixable = [\"ALL\"]\nunfixable = []\n\n[tool.ruff.format]\n# Use double quotes for strings\nquote-style = \"double\"\n\n# Indent with spaces\nindent-style = \"space\"\n\n# Use Unix-style line endings\nline-ending = \"auto\"\n\n[tool.ruff.lint.isort]\nknown-first-party = [\"zapi\"]\n"
  },
  {
    "path": "requirements.txt",
    "content": "playwright>=1.40.0\nrequests>=2.31.0\ncryptography>=41.0.0\nhttpx>=0.25.0\npydantic>=2.0.0\npython-dotenv>=1.0.0\nlangchain>=1.0.0\nlangchain-anthropic>=1.0.0\nlangchain-openai>=1.0.0\nclick>=8.0.0\n\n# Development dependencies\nruff>=0.6.0\npre-commit>=3.0.0\n\n"
  },
  {
    "path": "scripts/README.md",
    "content": "# ZAPI Scripts\n\nUtility scripts for ZAPI development and maintenance.\n\n## Pre-commit Script\n\n**File:** `pre-commit.sh`\n\nRuns Ruff linting and formatting checks before allowing a commit.\n\n### Usage\n\n```bash\n# Make it executable (one-time)\nchmod +x scripts/pre-commit.sh\n\n# Run manually\n./scripts/pre-commit.sh\n```\n\n### What it checks\n\n- ✅ Ruff linting (with auto-fix suggestions)\n- ✅ Ruff formatting (with format suggestions)\n- ❌ Exits with error if checks fail\n\n### Alternative: Use pre-commit hooks\n\nFor automatic checks on every commit:\n\n```bash\npip install pre-commit\npre-commit install\n```\n\nThis uses `.pre-commit-config.yaml` and runs automatically on `git commit`.\n"
  },
  {
    "path": "scripts/pre-commit.sh",
    "content": "#!/bin/bash\n# Pre-commit script for ZAPI\n# This script runs Ruff linting and formatting checks before allowing a commit\n\nset -e  # Exit on error\n\necho \"🔍 Running pre-commit checks...\"\necho \"\"\n\n# Colors for output\nRED='\\033[0;31m'\nGREEN='\\033[0;32m'\nYELLOW='\\033[1;33m'\nNC='\\033[0m' # No Color\n\n# Check if ruff is installed\nif ! command -v ruff &> /dev/null; then\n    echo -e \"${RED}❌ Ruff is not installed!${NC}\"\n    echo \"Install it with: pip install ruff\"\n    exit 1\nfi\n\n# Run Ruff linter\necho \"📝 Running Ruff linter...\"\nif ruff check .; then\n    echo -e \"${GREEN}✅ Linting passed!${NC}\"\nelse\n    echo -e \"${RED}❌ Linting failed!${NC}\"\n    echo \"\"\n    echo \"Run 'ruff check . --fix' to auto-fix issues\"\n    exit 1\nfi\n\necho \"\"\n\n# Run Ruff formatter check\necho \"🎨 Checking code formatting...\"\nif ruff format --check .; then\n    echo -e \"${GREEN}✅ Formatting check passed!${NC}\"\nelse\n    echo -e \"${RED}❌ Code is not formatted correctly!${NC}\"\n    echo \"\"\n    echo \"Run 'ruff format .' to format your code\"\n    exit 1\nfi\n\necho \"\"\necho -e \"${GREEN}✨ All pre-commit checks passed! Ready to commit.${NC}\"\n"
  },
  {
    "path": "setup.py",
    "content": "\"\"\"\nSetup script for ZAPI - maintained for backwards compatibility.\nPrefer using pyproject.toml for modern Python packaging.\n\"\"\"\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\", encoding=\"utf-8\") as fh:\n    long_description = fh.read()\n\nsetup(\n    name=\"zapi\",\n    version=\"0.1.0\",\n    author=\"ZAPI Contributors\",\n    description=\"Zero-Config API Intelligence - automatically discover, understand, and prepare APIs for LLM and agent workflows\",\n    long_description=long_description,\n    long_description_content_type=\"text/markdown\",\n    url=\"https://github.com/adoptai/zapi\",\n    packages=find_packages(),\n    classifiers=[\n        \"Development Status :: 3 - Alpha\",\n        \"Intended Audience :: Developers\",\n        \"License :: OSI Approved :: MIT License\",\n        \"Programming Language :: Python :: 3\",\n        \"Programming Language :: Python :: 3.9\",\n        \"Programming Language :: Python :: 3.10\",\n        \"Programming Language :: Python :: 3.11\",\n        \"Programming Language :: Python :: 3.12\",\n    ],\n    python_requires=\">=3.9\",\n    install_requires=[\n        \"playwright>=1.40.0\",\n    ],\n    keywords=\"api llm automation browser network har\",\n)\n"
  },
  {
    "path": "zapi/__init__.py",
    "content": "\"\"\"\nZAPI - Zero-Config API Intelligence\n\nAn open-source library that automatically discovers, understands,\nand prepares APIs for LLM and agent workflows.\n\"\"\"\n\nfrom .auth import AuthMode\nfrom .constants import BASE_URL\nfrom .core import ZAPI\nfrom .encryption import LLMKeyEncryption\nfrom .exceptions import ZAPIAuthenticationError, ZAPIError, ZAPINetworkError, ZAPIValidationError\nfrom .har_processing import (\n    HarProcessingError,\n    HarProcessor,\n    HarStats,\n    analyze_har_file,\n)\nfrom .providers import LLMProvider\nfrom .session import BrowserInitializationError, BrowserNavigationError, BrowserSession, BrowserSessionError\nfrom .utils import (\n    interactive_chat,\n    load_llm_credentials,\n)\n\n__version__ = \"0.1.0\"\n__all__ = [\n    \"ZAPI\",\n    \"BrowserSession\",\n    \"AuthMode\",\n    \"LLMProvider\",\n    \"LLMKeyEncryption\",\n    \"load_llm_credentials\",\n    # HAR processing\n    \"HarProcessor\",\n    \"HarStats\",\n    \"analyze_har_file\",\n    \"interactive_chat\",\n    # Exception classes\n    \"ZAPIError\",\n    \"ZAPIAuthenticationError\",\n    \"ZAPIValidationError\",\n    \"ZAPINetworkError\",\n    \"BrowserSessionError\",\n    \"BrowserNavigationError\",\n    \"BrowserInitializationError\",\n    \"HarProcessingError\",\n    \"BASE_URL\",\n]\n"
  },
  {
    "path": "zapi/auth.py",
    "content": "\"\"\"Authentication handlers for different auth modes.\"\"\"\n\nfrom typing import Literal\n\nfrom playwright.async_api import BrowserContext, Page\n\nfrom .exceptions import AuthError\n\nAuthMode = Literal[\"localStorage\", \"cookie\", \"header\"]\n\n\nasync def apply_localstorage_auth(page: Page, token: str, key: str = \"authToken\") -> None:\n    \"\"\"\n    Inject authentication token into localStorage.\n\n    Args:\n        page: Playwright page instance\n        token: Authentication token\n        key: localStorage key name (default: \"authToken\")\n    \"\"\"\n    await page.evaluate(f\"localStorage.setItem('{key}', '{token}')\")\n\n\nasync def apply_cookie_auth(page: Page, token: str, name: str = \"authToken\", domain: str = None) -> None:\n    \"\"\"\n    Set authentication token as a cookie.\n\n    Args:\n        page: Playwright page instance\n        token: Authentication token\n        name: Cookie name (default: \"authToken\")\n        domain: Cookie domain (optional)\n    \"\"\"\n    cookie = {\n        \"name\": name,\n        \"value\": token,\n        \"path\": \"/\",\n    }\n    if domain:\n        cookie[\"domain\"] = domain\n\n    await page.context.add_cookies([cookie])\n\n\nasync def apply_header_auth(context: BrowserContext, token: str) -> None:\n    \"\"\"\n    Add Authorization header to all requests.\n\n    Args:\n        context: Playwright browser context\n        token: Authentication token (will be added as \"Bearer <token>\")\n    \"\"\"\n    await context.set_extra_http_headers({\"Authorization\": f\"Bearer {token}\"})\n\n\ndef get_auth_handler(auth_mode: AuthMode):\n    \"\"\"\n    Factory function to get the appropriate auth handler.\n\n    Args:\n        auth_mode: Authentication mode (\"localStorage\", \"cookie\", or \"header\")\n\n    Returns:\n        Corresponding auth handler function\n\n    Raises:\n        AuthError: If auth_mode is not recognized\n    \"\"\"\n    handlers = {\n        \"localStorage\": apply_localstorage_auth,\n        \"cookie\": apply_cookie_auth,\n        \"header\": apply_header_auth,\n    }\n\n    if auth_mode not in handlers:\n        raise AuthError(f\"Invalid auth_mode: {auth_mode}. Must be one of: {', '.join(handlers.keys())}\")\n\n    return handlers[auth_mode]\n"
  },
  {
    "path": "zapi/cli.py",
    "content": "\"\"\"Command-line interface for ZAPI.\"\"\"\n\nimport time\nfrom pathlib import Path\n\nimport click\n\nfrom .core import ZAPI\nfrom .har_processing import analyze_har_file\n\n\n@click.group()\ndef cli():\n    \"\"\"ZAPI command-line tool.\"\"\"\n    pass\n\n\n@cli.command()\n@click.argument(\"url\")\n@click.option(\"--output\", default=\"session.har\", help=\"Output HAR file path.\")\n@click.option(\"--headless/--no-headless\", default=False, help=\"Run browser in headless mode.\")\ndef capture(url, output, headless):\n    \"\"\"Capture a browser session to a HAR file.\"\"\"\n    zapi_client = ZAPI()\n    output_path = Path(output)\n\n    click.echo(f\"🌐 Launching browser to capture: {url}\")\n    session = zapi_client.launch_browser(url=url, headless=headless)\n\n    try:\n        if not headless:\n            click.echo(\"📋 Use the browser freely, then press ENTER to save the HAR...\")\n            input()\n        else:\n            click.echo(\"Running in headless mode. The script will automatically close the session.\")\n            # In a real-world headless scenario, you might add some automated actions here.\n            # For now, we'll just wait for a moment.\n            time.sleep(10)  # Wait 10 seconds\n\n        click.echo(\"💾 Saving session logs...\")\n        session.dump_logs(str(output_path))\n        click.echo(f\"✅ Session saved to: {output_path}\")\n    finally:\n        session.close()\n        click.echo(\"🧹 Browser session closed.\")\n\n\n@cli.command()\n@click.argument(\"har_file\", type=click.Path(exists=True))\ndef analyze(har_file):\n    \"\"\"Analyze a HAR file.\"\"\"\n    click.echo(f\"🔍 Analyzing HAR file: {har_file}\")\n    stats, report, filtered_path = analyze_har_file(har_file, save_filtered=True)\n\n    click.echo(\"\\n📊 HAR Analysis Results:\")\n    click.echo(f\"   ✅ API-relevant entries: {stats.valid_entries:,}\")\n    click.echo(f\"   💰 Estimated cost: ${stats.estimated_cost_usd:.2f}\")\n    click.echo(f\"   ⏱️  Estimated processing time: {round(stats.estimated_time_minutes)} minutes\")\n    if filtered_path:\n        click.echo(f\"   🧹 Filtered HAR saved to: {filtered_path}\")\n\n\n@cli.command()\n@click.argument(\"har_file\", type=click.Path(exists=True))\ndef upload(har_file):\n    \"\"\"Upload a HAR file to ZAPI.\"\"\"\n    zapi_client = ZAPI()\n    click.echo(f\"☁️ Uploading HAR file: {har_file}\")\n    zapi_client.upload_har(har_file)\n    click.echo(\"✅ HAR file uploaded successfully!\")\n\n\nif __name__ == \"__main__\":\n    cli()\n"
  },
  {
    "path": "zapi/constants.py",
    "content": "BASE_URL = \"https://connect.adopt.ai\"\n"
  },
  {
    "path": "zapi/core.py",
    "content": "\"\"\"Core ZAPI class implementation.\"\"\"\n\nimport asyncio\nimport json\nfrom typing import Callable, Optional\n\nimport httpx\nimport requests\n\nfrom .constants import BASE_URL\nfrom .encryption import LLMKeyEncryption\nfrom .exceptions import (\n    AuthError,\n    LLMKeyError,\n    NetworkError,\n    ZAPIError,\n    ZAPINetworkError,\n    ZAPIValidationError,\n)\nfrom .providers import validate_llm_keys\nfrom .session import BrowserSession\nfrom .utils import load_zapi_credentials, set_llm_api_key_env\n\n\nclass ZAPI:\n    \"\"\"\n    Zero-Config API Intelligence main class.\n\n    This class provides a simple interface to launch browser sessions,\n    capture network traffic, and export HAR files for API discovery.\n    \"\"\"\n\n    def __init__(\n        self,\n        client_id: Optional[str] = None,\n        secret: Optional[str] = None,\n        llm_provider: Optional[str] = None,\n        llm_model_name: Optional[str] = None,\n        llm_api_key: Optional[str] = None,\n    ):\n        \"\"\"\n        Initialize ZAPI instance.\n\n        Args:\n            client_id: Client ID for authentication. If None, loads from ADOPT_CLIENT_ID env var.\n            secret: Secret key for authentication. If None, loads from ADOPT_SECRET_KEY env var.\n            llm_provider: LLM provider name (e.g., \"anthropic\"). If None, loads from LLM_PROVIDER env var.\n            llm_model_name: LLM model name (e.g., \"claude-3-5-sonnet-20241022\"). If None, loads from LLM_MODEL_NAME env var.\n            llm_api_key: LLM API key for the specified provider. If None, loads from LLM_API_KEY env var.\n\n        Raises:\n            ValueError: If client_id or secret is empty, or LLM key format is invalid\n            RuntimeError: If token fetch fails\n        \"\"\"\n        # Auto-load credentials from environment if not provided\n        if client_id is None or secret is None or llm_provider is None or llm_model_name is None or llm_api_key is None:\n            env_client_id, env_secret, env_llm_provider, env_llm_model_name, env_llm_api_key = load_zapi_credentials()\n\n            # Use provided values or fallback to environment values\n            client_id = client_id or env_client_id\n            secret = secret or env_secret\n            llm_provider = llm_provider or env_llm_provider\n            llm_model_name = llm_model_name or env_llm_model_name\n            llm_api_key = llm_api_key or env_llm_api_key\n\n        if not client_id or not client_id.strip():\n            raise ZAPIValidationError(\"client_id cannot be empty\")\n        if not secret or not secret.strip():\n            raise ZAPIValidationError(\"secret cannot be empty\")\n\n        self.client_id = client_id\n        self.secret = secret\n\n        # Fetch auth token and extract org_id\n        self.auth_token, self.org_id, self.email = self._fetch_auth_token()\n\n        # Initialize encryption handler\n        self._key_encryptor = LLMKeyEncryption(self.org_id)\n\n        # Validate and encrypt LLM key if provided\n        self._encrypted_llm_key: str = \"\"\n        self._llm_provider: str = llm_provider\n        self._llm_model_name: str = llm_model_name\n        self.set_llm_key(llm_provider, llm_api_key, llm_model_name)\n\n        # Automatically set LLM API key in environment for LangChain compatibility\n        if self._llm_provider and self._encrypted_llm_key:\n            try:\n                set_llm_api_key_env(self._llm_provider, self.get_decrypted_llm_key())\n            except Exception:\n                # Silently fail if LangChain integration is not available\n                pass\n\n    def _fetch_auth_token(self) -> tuple[str, str]:\n        \"\"\"\n        Fetch authentication token from adopt.ai API and extract org_id.\n\n        Returns:\n            Tuple of (authentication_token, org_id)\n\n        Raises:\n            RuntimeError: If token fetch fails or org_id extraction fails\n        \"\"\"\n        url = f\"{BASE_URL}/v1/auth/token\"\n        payload = {\"clientId\": self.client_id, \"secret\": self.secret}\n        headers = {\"accept\": \"application/json\", \"Content-Type\": \"application/json\"}\n\n        try:\n            response = requests.post(url, json=payload, headers=headers)\n            response.raise_for_status()\n            data = response.json()\n\n            # Extract token from response\n            if \"token\" in data:\n                token = data[\"token\"]\n            elif \"access_token\" in data:\n                token = data[\"access_token\"]\n            else:\n                raise RuntimeError(f\"Unexpected response format: {data}\")\n\n            # Validate token and extract org_id via backend API\n            try:\n                loop = asyncio.get_event_loop()\n            except RuntimeError:\n                loop = asyncio.new_event_loop()\n                asyncio.set_event_loop(loop)\n\n            org_id, email = loop.run_until_complete(self._validate_token_and_extract_org_id(token))\n\n            return token, org_id, email\n\n        except requests.exceptions.Timeout:\n            raise NetworkError(\"Authentication request timed out. Please check your internet connection.\")\n        except requests.exceptions.ConnectionError:\n            raise NetworkError(\n                \"Cannot connect to adopt.ai authentication service. Please check your internet connection.\"\n            )\n        except requests.exceptions.HTTPError as e:\n            if e.response.status_code == 401:\n                error_message = (\n                    \"Authentication Error: Invalid credentials\\\\n\\\\n\"\n                    \"Your ADOPT_CLIENT_ID or ADOPT_SECRET_KEY appears to be incorrect.\\\\n\\\\n\"\n                    \"Please check:\\\\n\"\n                    \"1. Your .env file has the correct credentials\\\\n\"\n                    \"2. Get valid credentials from https://app.adopt.ai\\\\n\"\n                    \"3. Ensure no extra spaces in your .env file\\\\n\\\\n\"\n                    \"Need help? See: https://docs.zapi.ai/authentication\"\n                )\n                raise AuthError(error_message)\n            elif e.response.status_code == 403:\n                raise AuthError(\"Access forbidden. Please check your account permissions.\")\n            else:\n                raise AuthError(f\"Authentication failed: HTTP {e.response.status_code}\")\n        except requests.exceptions.RequestException as e:\n            raise NetworkError(f\"Failed to fetch authentication token: {e}\")\n\n    async def _validate_token_and_extract_org_id(self, token: str) -> str:\n        \"\"\"\n        Validate JWT token via backend API and extract org_id.\n\n        Args:\n            token: JWT token string\n\n        Returns:\n            Organization ID extracted from validated token\n\n        Raises:\n            RuntimeError: If token validation fails or org_id extraction fails\n        \"\"\"\n        # Use adopt.ai backend API for token validation\n        async with httpx.AsyncClient() as client:\n            try:\n                response = await client.post(\n                    f\"{BASE_URL}/v1/auth/validate-token\",\n                    headers={\"Authorization\": f\"Bearer {token}\", \"Content-Type\": \"application/json\"},\n                )\n                response.raise_for_status()\n\n                validation_result = response.json()\n\n                # API returns org_id and user_email directly on success\n                org_id = validation_result.get(\"org_id\")\n                email = validation_result.get(\"user_email\", \"\")\n                if not org_id or not isinstance(org_id, str):\n                    raise RuntimeError(\"Invalid org_id in validation response\")\n\n                print(f\"Org ID: {org_id}\")\n                print(f\"Email: {email}\")\n\n                return org_id, email\n\n            except httpx.HTTPStatusError as e:\n                if e.response.status_code == 401:\n                    raise AuthError(\"Token validation failed: Invalid or expired token\")\n                elif e.response.status_code == 403:\n                    raise AuthError(\"Token validation failed: Access forbidden\")\n                else:\n                    raise NetworkError(f\"Backend token validation failed: HTTP {e.response.status_code}\")\n            except httpx.ConnectTimeout:\n                raise NetworkError(\"Token validation timed out. Please check your internet connection.\")\n            except httpx.RequestError as e:\n                raise NetworkError(f\"Token validation request failed: {e}\")\n            except Exception as e:\n                raise ZAPIError(f\"Token validation error: {e}\")\n\n    def set_llm_key(self, provider: str, api_key: str, model_name: str) -> None:\n        \"\"\"\n        Set LLM API key for a specific provider.\n\n        Args:\n            provider: Provider name (e.g., \"anthropic\")\n            api_key: API key for the specified provider\n\n        Raises:\n            ValueError: If provider or api_key format is invalid\n            RuntimeError: If encryption fails\n        \"\"\"\n        if not provider or not api_key:\n            self._encrypted_llm_key = None\n            self._llm_provider = None\n            self._llm_model_name = None\n            return\n\n        # Validate key format for the provider\n        try:\n            validated_keys = validate_llm_keys({provider: api_key})\n            validated_provider = list(validated_keys.keys())[0]\n            validated_key = list(validated_keys.values())[0]\n        except LLMKeyError as e:\n            raise LLMKeyError(f\"LLM key validation failed: {e}\")\n\n        # Encrypt only the API key using org_id (provider stored separately)\n        try:\n            self._encrypted_llm_key = self._key_encryptor.encrypt_key(validated_key)\n            self._llm_provider = validated_provider\n            self._llm_model_name = model_name\n        except Exception as e:\n            raise ZAPIError(f\"Failed to encrypt LLM key: {e}\")\n\n    def get_llm_provider(self) -> Optional[str]:\n        \"\"\"\n        Get the configured LLM provider.\n\n        Returns:\n            Provider name if configured, None otherwise\n        \"\"\"\n        return self._llm_provider\n\n    def get_llm_model_name(self) -> Optional[str]:\n        \"\"\"\n        Get the configured LLM model name.\n\n        Returns:\n            Model name if configured, None otherwise\n        \"\"\"\n        return self._llm_model_name\n\n    def get_encrypted_llm_key(self) -> Optional[str]:\n        \"\"\"\n        Get the encrypted LLM API key.\n\n        Returns:\n            Encrypted API key if configured, None otherwise\n        \"\"\"\n        return self._encrypted_llm_key\n\n    def get_decrypted_llm_key(self) -> Optional[str]:\n        \"\"\"\n        Get the decrypted LLM API key.\n\n        Returns:\n            Decrypted API key if configured, None otherwise\n        \"\"\"\n        try:\n            if not self._encrypted_llm_key:\n                return None\n            return self._key_encryptor.decrypt_key(self._encrypted_llm_key)\n        except Exception as e:\n            print(f\"Failed to decrypt LLM key: {e}\")\n            return None\n\n    def has_llm_key(self) -> bool:\n        \"\"\"\n        Check if LLM key is configured.\n\n        Returns:\n            True if LLM key is set, False otherwise\n        \"\"\"\n        return self._encrypted_llm_key is not None\n\n    def get_zapi_tools(self) -> list[Callable]:\n        \"\"\"\n        Get LangChain tools from ZAPI (created on-demand).\n\n        Returns:\n            List of LangChain tool functions\n        \"\"\"\n        try:\n            from .integrations.langchain.tool import ZAPILangchainTool\n\n            tool_creator = ZAPILangchainTool(self)\n            return tool_creator.create_tools()\n        except ImportError:\n            raise ImportError(\"LangChain integration not available. Install langchain to use this feature.\")\n\n    def launch_browser(\n        self, url: str, headless: bool = True, wait_until: str = \"load\", **playwright_options\n    ) -> BrowserSession:\n        \"\"\"\n        Launch a browser session with network logging.\n\n        Args:\n            url: Initial URL to navigate to\n            headless: Whether to run browser in headless mode (default: True)\n            wait_until: When to consider navigation complete (default: \"load\")\n                       Options: \"load\", \"domcontentloaded\", \"networkidle\"\n            **playwright_options: Additional Playwright browser launch options.\n                                 Use `args=[\"--disable-web-security\"]` to disable\n                                 web security (for testing only).\n\n        Returns:\n            BrowserSession instance ready for navigation and interaction\n\n        Raises:\n            ZAPIValidationError: If URL format is invalid\n            ZAPIError: If browser launch fails\n\n        Example:\n            >>> z = ZAPI(client_id=\"YOUR_CLIENT_ID\", secret=\"YOUR_SECRET\")\n            >>> session = z.launch_browser(url=\"https://app.example.com\")\n            >>> session.dump_logs(\"session.har\")\n            >>> session.close()\n\n            # Disable web security (for testing only):\n            >>> session = z.launch_browser(\n            ...     url=\"https://app.example.com\",\n            ...     args=[\"--disable-web-security\"]\n            ... )\n        \"\"\"\n        session = BrowserSession(auth_token=self.auth_token, headless=headless, **playwright_options)\n\n        # Initialize the session synchronously with enhanced error handling\n        try:\n            loop = asyncio.get_event_loop()\n        except RuntimeError:\n            loop = asyncio.new_event_loop()\n            asyncio.set_event_loop(loop)\n\n        try:\n            loop.run_until_complete(session._initialize(initial_url=url, wait_until=wait_until))\n        except Exception as e:\n            # Close session if initialization failed\n            try:\n                session.close()\n            except Exception:\n                # Ignore cleanup errors, focus on the original error\n                pass\n\n            error_message = str(e)\n\n            # Provide specific error messages for common browser issues\n            if \"Cannot navigate to invalid URL\" in error_message:\n                raise ZAPIValidationError(\n                    f\"Browser cannot navigate to URL: '{url}'. Please check the URL format and ensure it's accessible.\"\n                )\n            elif \"net::ERR_NAME_NOT_RESOLVED\" in error_message:\n                raise NetworkError(\n                    f\"Domain name could not be resolved: '{url}'. \"\n                    \"Please check the URL spelling and your internet connection.\"\n                )\n            elif \"net::ERR_CONNECTION_REFUSED\" in error_message:\n                raise NetworkError(\n                    f\"Connection refused to: '{url}'. The server may be down or the URL may be incorrect.\"\n                )\n            elif \"Timeout\" in error_message:\n                raise NetworkError(\n                    f\"Timeout while loading: '{url}'. \"\n                    \"The website took too long to respond. Please try again or use a different URL.\"\n                )\n            else:\n                raise ZAPIError(f\"Failed to launch browser session: {error_message}\")\n\n        return session\n\n    def upload_har(self, har_file: str):\n        \"\"\"\n        Upload a HAR file to the ZAPI API with optional encrypted LLM keys.\n\n        Args:\n            har_file: Path to the HAR file to upload\n\n        Returns:\n            Response JSON from the API\n\n        Raises:\n            ZAPIValidationError: If file validation fails\n            ZAPINetworkError: If upload fails due to network issues\n            ZAPIAuthenticationError: If authentication fails\n        \"\"\"\n        url = f\"{BASE_URL}/v1/api-discovery/upload-file\"\n\n        headers = {\"Authorization\": f\"Bearer {self.auth_token}\"}\n\n        # Prepare metadata if LLM key is configured\n        metadata = {}\n        if self.has_llm_key():\n            metadata = {\n                \"byok_encrypted_llm_key\": self._encrypted_llm_key,\n                \"byok_llm_provider\": self._llm_provider,  # Provider sent in plaintext\n                \"byok_llm_model\": self._llm_model_name,\n                \"byok_enabled\": True,\n                \"is_trial_user\": True,\n            }\n\n            if self.email:\n                metadata[\"user_email\"] = self.email\n        else:\n            metadata = {\n                \"byok_enabled\": False,\n                \"is_trial_user\": True,\n            }\n\n            if self.email:\n                metadata[\"user_email\"] = self.email\n\n        # Prepare multipart form data with enhanced error handling\n        try:\n            with open(har_file, \"rb\") as f:\n                files = {\"file\": (har_file, f, \"application/json\")}\n\n                # Add metadata as form data\n                data = {\"metadata\": json.dumps(metadata)}\n\n                response = requests.post(url, headers=headers, files=files, data=data, timeout=60)\n\n        except FileNotFoundError:\n            raise ZAPIValidationError(f\"HAR file not found: '{har_file}'\")\n        except PermissionError:\n            raise ZAPIValidationError(f\"Permission denied reading HAR file: '{har_file}'\")\n        except requests.exceptions.Timeout:\n            raise NetworkError(\"Upload request timed out. Please try again.\")\n        except requests.exceptions.ConnectionError:\n            raise NetworkError(\"Cannot connect to ZAPI upload service. Please check your internet connection.\")\n        except requests.exceptions.HTTPError as e:\n            if e.response.status_code == 401:\n                raise AuthError(\"Upload failed: Invalid or expired authentication token\")\n            elif e.response.status_code == 413:\n                raise ZAPIValidationError(\"HAR file is too large. Please try with a smaller session.\")\n            elif e.response.status_code == 400:\n                raise ZAPIValidationError(\"Invalid HAR file format. Please ensure the file was generated correctly.\")\n            else:\n                raise NetworkError(f\"Upload failed: HTTP {e.response.status_code}\")\n        except requests.exceptions.RequestException as e:\n            raise NetworkError(f\"Upload request failed: {e}\")\n\n        try:\n            response.raise_for_status()\n            print(\"file uploaded successfully\")\n            if self.has_llm_key():\n                print(f\"Included encrypted key for provider: {self.get_llm_provider()}\")\n            return response.json()\n        except requests.exceptions.HTTPError:\n            # This should be caught above, but just in case\n            raise ZAPINetworkError(f\"Upload failed with status code: {response.status_code}\")\n        except json.JSONDecodeError:\n            raise ZAPIError(\"Invalid response format from upload service\")\n\n    def get_documented_apis(self, page: int = 1, page_size: int = 10):\n        \"\"\"\n        Fetch the list of documented APIs with pagination support.\n\n        Args:\n            page: Page number to fetch (default: 1)\n            page_size: Number of items per page (default: 10)\n\n        Returns:\n            Response JSON containing the list of documented APIs\n\n        Raises:\n            requests.exceptions.RequestException: If the request fails\n        \"\"\"\n        url = f\"{BASE_URL}/v1/tools/apis\"\n        headers = {\"Authorization\": f\"Bearer {self.auth_token}\"}\n        params = {\"page\": page, \"page_size\": page_size}\n\n        response = requests.get(url, headers=headers, params=params)\n        response.raise_for_status()\n        return response.json()\n"
  },
  {
    "path": "zapi/encryption.py",
    "content": "\"\"\"Secure encryption/decryption utilities for LLM API keys.\"\"\"\n\nimport base64\nimport secrets\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\n\n\nclass LLMKeyEncryption:\n    \"\"\"Handles encryption/decryption of LLM API keys using org_id as context.\"\"\"\n\n    # Constants for encryption\n    KEY_LENGTH = 32  # 256 bits for AES-256\n    NONCE_LENGTH = 12  # 96 bits for GCM\n    SALT_LENGTH = 16  # 128 bits\n    TAG_LENGTH = 16  # 128 bits for GCM tag\n    ITERATIONS = 100000  # PBKDF2 iterations\n\n    def __init__(self, org_id: str):\n        \"\"\"\n        Initialize encryption handler with organization ID.\n\n        Args:\n            org_id: Organization ID used as encryption context\n\n        Raises:\n            ValueError: If org_id is empty or invalid\n        \"\"\"\n        if not org_id or not org_id.strip():\n            raise ValueError(\"org_id cannot be empty\")\n\n        self.org_id = org_id.strip()\n\n    def _derive_key(self, salt: bytes) -> bytes:\n        \"\"\"\n        Derive encryption key from org_id using PBKDF2.\n\n        Args:\n            salt: Random salt for key derivation\n\n        Returns:\n            Derived encryption key\n        \"\"\"\n        kdf = PBKDF2HMAC(\n            algorithm=hashes.SHA256(),\n            length=self.KEY_LENGTH,\n            salt=salt,\n            iterations=self.ITERATIONS,\n            backend=default_backend(),\n        )\n        return kdf.derive(self.org_id.encode(\"utf-8\"))\n\n    def encrypt_key(self, api_key: str) -> str:\n        \"\"\"\n        Encrypt a single LLM API key using org_id as context.\n\n        Args:\n            api_key: API key to encrypt\n\n        Returns:\n            Base64-encoded encrypted data with embedded salt and nonce\n\n        Raises:\n            ValueError: If encryption fails\n        \"\"\"\n        if not api_key or not api_key.strip():\n            raise ValueError(\"api_key cannot be empty\")\n\n        try:\n            # Generate random salt and nonce\n            salt = secrets.token_bytes(self.SALT_LENGTH)\n            nonce = secrets.token_bytes(self.NONCE_LENGTH)\n\n            # Derive encryption key\n            key = self._derive_key(salt)\n\n            # Only encrypt the API key itself (no provider needed)\n            plaintext = api_key.strip().encode(\"utf-8\")\n\n            # Encrypt using AES-256-GCM\n            cipher = Cipher(algorithms.AES(key), modes.GCM(nonce), backend=default_backend())\n            encryptor = cipher.encryptor()\n            ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n\n            # Package: salt + nonce + ciphertext + tag\n            encrypted_data = salt + nonce + ciphertext + encryptor.tag\n\n            # Return base64-encoded result\n            return base64.b64encode(encrypted_data).decode(\"ascii\")\n\n        except Exception as e:\n            raise ValueError(f\"Failed to encrypt LLM key: {e}\")\n        finally:\n            # Clear sensitive data from memory\n            if \"key\" in locals():\n                key = b\"\\x00\" * len(key)\n            if \"plaintext\" in locals():\n                plaintext = b\"\\x00\" * len(plaintext)\n\n    def decrypt_key(self, encrypted_data: str) -> str:\n        \"\"\"\n        Decrypt a single LLM API key from encrypted data.\n\n        Args:\n            encrypted_data: Base64-encoded encrypted data\n\n        Returns:\n            Decrypted API key string\n\n        Raises:\n            ValueError: If decryption fails or data is corrupted\n        \"\"\"\n        if not encrypted_data or not encrypted_data.strip():\n            raise ValueError(\"encrypted_data cannot be empty\")\n\n        key = None\n        plaintext = None\n\n        try:\n            # Decode base64 data\n            try:\n                data = base64.b64decode(encrypted_data.encode(\"ascii\"))\n            except Exception as e:\n                raise ValueError(f\"Invalid base64 encoding: {e}\")\n\n            # Validate minimum length\n            min_length = self.SALT_LENGTH + self.NONCE_LENGTH + self.TAG_LENGTH + 1\n            if len(data) < min_length:\n                raise ValueError(\"Encrypted data is too short\")\n\n            # Extract components\n            salt = data[: self.SALT_LENGTH]\n            nonce = data[self.SALT_LENGTH : self.SALT_LENGTH + self.NONCE_LENGTH]\n            tag_start = len(data) - self.TAG_LENGTH\n            ciphertext = data[self.SALT_LENGTH + self.NONCE_LENGTH : tag_start]\n            tag = data[tag_start:]\n\n            # Derive decryption key\n            key = self._derive_key(salt)\n\n            # Decrypt using AES-256-GCM\n            cipher = Cipher(algorithms.AES(key), modes.GCM(nonce, tag), backend=default_backend())\n            decryptor = cipher.decryptor()\n            plaintext = decryptor.update(ciphertext) + decryptor.finalize()\n\n            # Return decrypted API key directly\n            return plaintext.decode(\"utf-8\")\n\n        except Exception as e:\n            if \"Invalid base64\" in str(e):\n                raise\n            raise ValueError(f\"Failed to decrypt LLM key: {e}\")\n        finally:\n            # Clear sensitive data from memory\n            if key is not None:\n                key = b\"\\x00\" * len(key)\n            if plaintext is not None:\n                plaintext = b\"\\x00\" * len(plaintext)\n\n\ndef encrypt_llm_key(org_id: str, api_key: str) -> str:\n    \"\"\"\n    Convenience function to encrypt a single LLM key.\n\n    Args:\n        org_id: Organization ID for encryption context\n        api_key: API key to encrypt\n\n    Returns:\n        Base64-encoded encrypted data\n    \"\"\"\n    encryptor = LLMKeyEncryption(org_id)\n    return encryptor.encrypt_key(api_key)\n\n\ndef decrypt_llm_key(org_id: str, encrypted_data: str) -> str:\n    \"\"\"\n    Convenience function to decrypt a single LLM key.\n\n    Args:\n        org_id: Organization ID for decryption context\n        encrypted_data: Base64-encoded encrypted data\n\n    Returns:\n        Decrypted API key string\n    \"\"\"\n    decryptor = LLMKeyEncryption(org_id)\n    return decryptor.decrypt_key(encrypted_data)\n\n\ndef secure_compare_key(provider1: str, key1: str, provider2: str, key2: str) -> bool:\n    \"\"\"\n    Securely compare two provider-key pairs without timing attacks.\n\n    Args:\n        provider1: First provider name\n        key1: First API key\n        provider2: Second provider name\n        key2: Second API key\n\n    Returns:\n        True if both provider and key match, False otherwise\n    \"\"\"\n    # Use secrets.compare_digest for timing-safe comparison\n    provider_match = secrets.compare_digest(provider1, provider2)\n    key_match = secrets.compare_digest(key1, key2)\n\n    return provider_match and key_match\n"
  },
  {
    "path": "zapi/exceptions.py",
    "content": "\"\"\"Custom exception classes for ZAPI.\"\"\"\n\n\nclass ZAPIError(Exception):\n    \"\"\"Base exception class for ZAPI errors.\"\"\"\n\n    pass\n\n\nclass ZAPIAuthenticationError(ZAPIError):\n    \"\"\"Authentication-related errors.\"\"\"\n\n    pass\n\n\nclass ZAPIValidationError(ZAPIError):\n    \"\"\"Input validation errors.\"\"\"\n\n    pass\n\n\nclass ZAPINetworkError(ZAPIError):\n    \"\"\"Network-related errors.\"\"\"\n\n    pass\n\n\n# Internal aliases for consistency\nAuthError = ZAPIAuthenticationError\nNetworkError = ZAPINetworkError\nLLMKeyError = ZAPIValidationError\n"
  },
  {
    "path": "zapi/har_processing.py",
    "content": "\"\"\"HAR file processing and analysis module.\"\"\"\n\nimport json\nimport os\nimport re\nfrom dataclasses import dataclass\nfrom typing import Any, Optional\nfrom urllib.parse import urlparse\n\n\n@dataclass\nclass HarStats:\n    \"\"\"Statistics for a HAR file.\"\"\"\n\n    total_entries: int\n    valid_entries: int\n    skipped_entries: int\n    unique_domains: int\n    estimated_cost_usd: float\n    estimated_time_minutes: float\n    skipped_by_reason: dict[str, int]\n    domains: list[str]\n\n\nclass HarProcessingError(Exception):\n    \"\"\"Base exception for HAR processing errors.\"\"\"\n\n    pass\n\n\nclass HarProcessor:\n    \"\"\"\n    Class to preprocess and analyze HAR files.\n\n    Provides functionality to load HAR files, extract entries, and generate\n    statistics including cost and time estimates for processing.\n    \"\"\"\n\n    # Cost per entry in USD\n    COST_PER_ENTRY = 0.02\n\n    # Time per entry in minutes (24 seconds = 0.4 minutes)\n    TIME_PER_ENTRY_MINUTES = 24 / 60\n\n    # Filter patterns for static assets and non-API content\n    DENY_EXTENSIONS = re.compile(\n        r\"\\.(js|css|png|jpe?g|gif|svg|webp|ico|bmp|avif|mp4|webm|mp3|wav|woff2?|ttf|otf|map|jpf)(\\?.*)?$\",\n        re.IGNORECASE,\n    )\n\n    # MIME types to exclude\n    DENY_MIMETYPES = {\n        \"text/css\",\n        \"text/javascript\",\n        \"application/javascript\",\n        \"application/x-javascript\",\n        \"image/jpeg\",\n        \"image/png\",\n        \"image/gif\",\n        \"image/webp\",\n        \"image/svg+xml\",\n        \"image/x-icon\",\n        \"font/woff\",\n        \"font/woff2\",\n        \"font/ttf\",\n        \"font/otf\",\n        \"audio/mpeg\",\n        \"audio/wav\",\n        \"video/mp4\",\n        \"video/webm\",\n        \"application/pdf\",\n        \"application/font-woff\",\n    }\n\n    def __init__(self, har_file_path: str):\n        \"\"\"\n        Initialize HAR processor with a file path.\n\n        Args:\n            har_file_path: Path to the HAR file to process\n\n        Raises:\n            HarProcessingError: If file doesn't exist or is not readable\n        \"\"\"\n        self.har_file_path = har_file_path\n        self.har_data = None\n        self.entries = []\n        self.skipped_entries_by_reason: dict[str, list[dict]] = {\n            \"invalid_entry_format\": [],\n            \"non_http_scheme\": [],\n            \"missing_url\": [],\n            \"parsing_error\": [],\n            \"denied_extension\": [],\n            \"denied_mime_type\": [],\n        }\n        self.skipped_counters: dict[str, int] = {\n            \"invalid_entry_format\": 0,\n            \"non_http_scheme\": 0,\n            \"missing_url\": 0,\n            \"parsing_error\": 0,\n            \"denied_extension\": 0,\n            \"denied_mime_type\": 0,\n        }\n        self.skipped_entries = 0\n        self.domains_found = set()\n\n        # Validate file exists and is readable\n        if not os.path.exists(har_file_path):\n            raise HarProcessingError(f\"HAR file not found: {har_file_path}\")\n\n        if not os.access(har_file_path, os.R_OK):\n            raise HarProcessingError(f\"HAR file is not readable: {har_file_path}\")\n\n    def load_and_process(self) -> HarStats:\n        \"\"\"\n        Load HAR file and process all entries to generate statistics.\n\n        Returns:\n            HarStats object containing comprehensive statistics\n\n        Raises:\n            HarProcessingError: If file processing fails\n        \"\"\"\n        try:\n            # Load HAR file content\n            with open(self.har_file_path, encoding=\"utf-8\", errors=\"replace\") as f:\n                har_file_content = f.read()\n\n            # Parse JSON\n            try:\n                self.har_data = json.loads(har_file_content)\n            except json.JSONDecodeError as e:\n                error_message = (\n                    \"HAR File Error: Invalid JSON format.\\\\n\\\\n\"\n                    f\"The file '{self.har_file_path}' could not be parsed as valid JSON.\\\\n\"\n                    f\"Error details: {e}\\\\n\\\\n\"\n                    \"Please check for:\\\\n\"\n                    \"1. File corruption during download or transfer.\\\\n\"\n                    \"2. Incomplete file content.\\\\n\"\n                    \"3. Manual edits that broke the JSON structure.\"\n                )\n                raise HarProcessingError(error_message)\n\n            # Validate HAR structure\n            if (\n                not isinstance(self.har_data, dict)\n                or \"log\" not in self.har_data\n                or \"entries\" not in self.har_data[\"log\"]\n            ):\n                error_message = (\n                    \"HAR File Error: Invalid HAR structure.\\\\n\\\\n\"\n                    f\"The file '{self.har_file_path}' does not follow the expected HAR format.\\\\n\"\n                    \"It must contain a `log` object with an `entries` array.\\\\n\\\\n\"\n                    \"Please ensure the file was generated by a compatible tool.\"\n                )\n                raise HarProcessingError(error_message)\n\n            entries = self.har_data[\"log\"][\"entries\"]\n            if not isinstance(entries, list):\n                raise HarProcessingError(\"HAR entries must be a list\")\n\n            # Process each entry\n            valid_entries = 0\n            for entry in entries:\n                if self._process_entry(entry):\n                    valid_entries += 1\n\n            # Generate statistics\n            total_entries = len(entries)\n\n            return HarStats(\n                total_entries=total_entries,\n                valid_entries=valid_entries,\n                skipped_entries=self.skipped_entries,\n                unique_domains=len(self.domains_found),\n                estimated_cost_usd=valid_entries * self.COST_PER_ENTRY,\n                estimated_time_minutes=valid_entries * self.TIME_PER_ENTRY_MINUTES,\n                skipped_by_reason=dict(self.skipped_counters),\n                domains=sorted(self.domains_found),\n            )\n\n        except FileNotFoundError:\n            raise HarProcessingError(f\"HAR file not found: {self.har_file_path}\")\n        except PermissionError:\n            raise HarProcessingError(f\"Permission denied reading HAR file: {self.har_file_path}\")\n        except Exception as e:\n            raise HarProcessingError(f\"Error processing HAR file: {e}\")\n\n    def _process_entry(self, entry: dict[str, Any]) -> bool:\n        \"\"\"\n        Process a single HAR entry and extract relevant information.\n\n        Args:\n            entry: HAR entry dictionary\n\n        Returns:\n            True if entry is valid and processed, False if skipped\n        \"\"\"\n        try:\n            # Basic validation - check for required fields\n            if \"request\" not in entry or \"response\" not in entry:\n                self.skipped_entries_by_reason[\"invalid_entry_format\"].append(entry)\n                self.skipped_counters[\"invalid_entry_format\"] += 1\n                self.skipped_entries += 1\n                return False\n\n            # Extract URL\n            url = self._extract_url_from_entry(entry)\n            if not url:\n                self.skipped_entries_by_reason[\"missing_url\"].append(entry)\n                self.skipped_counters[\"missing_url\"] += 1\n                self.skipped_entries += 1\n                return False\n\n            # Validate HTTP/HTTPS scheme\n            if not url.lower().startswith((\"http://\", \"https://\")):\n                self.skipped_entries_by_reason[\"non_http_scheme\"].append(entry)\n                self.skipped_counters[\"non_http_scheme\"] += 1\n                self.skipped_entries += 1\n                return False\n\n            # Filter by file extensions - exclude static assets\n            try:\n                parsed_url = urlparse(url)\n                path = parsed_url.path\n                if self.DENY_EXTENSIONS.search(path):\n                    self.skipped_entries_by_reason[\"denied_extension\"].append(entry)\n                    self.skipped_counters[\"denied_extension\"] += 1\n                    self.skipped_entries += 1\n                    return False\n            except Exception:\n                # URL parsing failed, but we'll continue processing\n                pass\n\n            # Filter by response MIME types\n            response_content = self._extract_response_content(entry)\n            mime_type = response_content.get(\"mimeType\", \"\").split(\";\")[0]\n            if mime_type in self.DENY_MIMETYPES:\n                self.skipped_entries_by_reason[\"denied_mime_type\"].append(entry)\n                self.skipped_counters[\"denied_mime_type\"] += 1\n                self.skipped_entries += 1\n                return False\n\n            # Extract domain information\n            try:\n                parsed_url = urlparse(url)\n                domain = parsed_url.netloc\n                if domain:\n                    self.domains_found.add(domain)\n            except Exception:\n                # URL parsing failed, but we'll still count it as valid\n                pass\n\n            # Store processed entry\n            self.entries.append(entry)\n            return True\n\n        except Exception:\n            self.skipped_entries_by_reason[\"parsing_error\"].append(entry)\n            self.skipped_counters[\"parsing_error\"] += 1\n            self.skipped_entries += 1\n            return False\n\n    def _extract_url_from_entry(self, entry: dict[str, Any]) -> str:\n        \"\"\"Extract URL from an entry efficiently, returning empty string if not found.\"\"\"\n        try:\n            return entry.get(\"request\", {}).get(\"url\", \"\")\n        except (KeyError, AttributeError):\n            return \"\"\n\n    def _extract_response_content(self, entry: dict[str, Any]) -> dict[str, Any]:\n        \"\"\"Extract response content from an entry efficiently, returning empty dict if not found.\"\"\"\n        try:\n            return entry.get(\"response\", {}).get(\"content\", {})\n        except (KeyError, AttributeError):\n            return {}\n\n    def save_filtered_har(self, output_path: str) -> str:\n        \"\"\"\n        Save a new HAR file containing only the valid API-relevant entries.\n\n        Args:\n            output_path: Path where to save the filtered HAR file\n\n        Returns:\n            Path to the saved filtered HAR file\n\n        Raises:\n            HarProcessingError: If saving fails or no data has been processed\n        \"\"\"\n        if self.har_data is None:\n            raise HarProcessingError(\"No HAR data loaded. Call load_and_process() first.\")\n\n        if not self.entries:\n            raise HarProcessingError(\"No valid entries found to save.\")\n\n        try:\n            # Create a copy of the original HAR structure\n            filtered_har = {\n                \"log\": {\n                    \"version\": self.har_data[\"log\"].get(\"version\", \"1.2\"),\n                    \"creator\": self.har_data[\"log\"].get(\"creator\", {\"name\": \"ZAPI HarProcessor\", \"version\": \"1.0.0\"}),\n                    \"browser\": self.har_data[\"log\"].get(\"browser\", {}),\n                    \"pages\": self.har_data[\"log\"].get(\"pages\", []),\n                    \"entries\": self.entries,  # Only include the filtered valid entries\n                }\n            }\n\n            # Add metadata about filtering\n            if \"creator\" not in filtered_har[\"log\"]:\n                filtered_har[\"log\"][\"creator\"] = {}\n\n            filtered_har[\"log\"][\"creator\"][\"name\"] = \"ZAPI HarProcessor (Filtered)\"\n            filtered_har[\"log\"][\"creator\"][\"comment\"] = (\n                f\"Filtered HAR file - {len(self.entries)} API entries from {len(self.har_data['log']['entries'])} total entries\"\n            )\n\n            # Save to file\n            with open(output_path, \"w\", encoding=\"utf-8\") as f:\n                json.dump(filtered_har, f, indent=2, ensure_ascii=False)\n\n            return output_path\n\n        except OSError as e:\n            raise HarProcessingError(f\"Failed to save filtered HAR file: {e}\")\n        except Exception as e:\n            raise HarProcessingError(f\"Error creating filtered HAR file: {e}\")\n\n    def get_summary_report(self, stats: HarStats) -> str:\n        \"\"\"\n        Generate a formatted summary report of the HAR analysis.\n\n        Args:\n            stats: HarStats object from load_and_process()\n\n        Returns:\n            Formatted string report\n        \"\"\"\n        report_lines = [\n            \"📊 HAR File Analysis Summary\",\n            \"=\" * 50,\n            f\"📁 File: {os.path.basename(self.har_file_path)}\",\n            f\"📋 Total Entries: {stats.total_entries:,}\",\n            f\"✅ Valid Entries: {stats.valid_entries:,}\",\n            f\"⚠️  Skipped Entries: {stats.skipped_entries:,}\",\n            f\"🌐 Unique Domains: {stats.unique_domains:,}\",\n            \"\",\n            \"💰 Cost Analysis (API entries only):\",\n            f\"   • Rate: ${self.COST_PER_ENTRY:.3f} per API entry\",\n            f\"   • Estimated Cost: ${stats.estimated_cost_usd:.2f}\",\n            \"\",\n            \"⏱️  Time Estimate (API entries only):\",\n            f\"   • Rate: {self.TIME_PER_ENTRY_MINUTES:.2f} minutes per API entry\",\n            f\"   • Estimated Time: {stats.estimated_time_minutes:.1f} minutes\",\n            f\"   • Estimated Time: {stats.estimated_time_minutes / 60:.1f} hours\",\n        ]\n\n        # Add skipped entry breakdown if there are any\n        if stats.skipped_entries > 0:\n            report_lines.extend([\"\", \"⚠️  Skipped Entry Breakdown:\"])\n            for reason, count in stats.skipped_by_reason.items():\n                if count > 0:\n                    reason_display = reason.replace(\"_\", \" \").title()\n                    report_lines.append(f\"   • {reason_display}: {count:,}\")\n\n        # Add top domains if there are any\n        if stats.domains:\n            report_lines.extend([\"\", \"🌐 Top Domains Found:\"])\n            # Show first 10 domains\n            for domain in stats.domains[:10]:\n                report_lines.append(f\"   • {domain}\")\n\n            if len(stats.domains) > 10:\n                report_lines.append(f\"   • ... and {len(stats.domains) - 10} more\")\n\n        return \"\\n\".join(report_lines)\n\n\ndef analyze_har_file(\n    har_file_path: str, save_filtered: bool = False, filtered_output_path: str = None\n) -> tuple[HarStats, str, Optional[str]]:\n    \"\"\"\n    Convenience function to analyze a HAR file and optionally save filtered version.\n\n    Args:\n        har_file_path: Path to the HAR file\n        save_filtered: Whether to save a filtered HAR file with only API entries\n        filtered_output_path: Path for filtered HAR file (auto-generated if None)\n\n    Returns:\n        Tuple of (HarStats, formatted_report_string, filtered_file_path_or_none)\n\n    Raises:\n        HarProcessingError: If processing fails\n    \"\"\"\n    processor = HarProcessor(har_file_path)\n    stats = processor.load_and_process()\n    report = processor.get_summary_report(stats)\n\n    filtered_file_path = None\n    if save_filtered and stats.valid_entries > 0:\n        if filtered_output_path is None:\n            # Auto-generate filtered file name\n            base_name = os.path.splitext(har_file_path)[0]\n            filtered_output_path = f\"{base_name}_filtered.har\"\n\n        filtered_file_path = processor.save_filtered_har(filtered_output_path)\n\n    return stats, report, filtered_file_path\n"
  },
  {
    "path": "zapi/integrations/langchain/tool.py",
    "content": "\"\"\"\nZAPI Langchain Tool - Simple & Clean\n\nBasic conversion of ZAPI documented APIs into Langchain tools.\n\"\"\"\n\nimport os\nfrom typing import Any, Callable, Optional\n\nimport requests\nfrom langchain_core.tools import tool\n\nfrom ...core import ZAPI\nfrom ...utils import load_security_headers\n\n\nclass ZAPILangchainTool:\n    \"\"\"\n    Simple tool provider to convert ZAPI APIs into Langchain tools.\n\n    Supports loading security headers from a JSON file for API authentication.\n    The headers file should contain a 'headers' object with key-value pairs\n    that will be added to all API requests.\n\n    Example headers file (api-headers.json):\n    {\n        \"headers\": {\n            \"Authorization\": \"Bearer your-token\",\n            \"X-API-Key\": \"your-api-key\",\n            \"X-Client-ID\": \"your-client-id\"\n        }\n    }\n    \"\"\"\n\n    def __init__(self, zapi_instance: ZAPI, headers_file: Optional[str] = None):\n        self.zapi = zapi_instance\n        self.security_headers = load_security_headers(headers_file)\n\n    def create_tools(self) -> list[Callable]:\n        \"\"\"Create Langchain tools from documented APIs.\"\"\"\n        # Get APIs from ZAPI\n        response = self.zapi.get_documented_apis(page_size=50)\n        apis = response.get(\"items\", [])\n\n        # Create tools\n        tools = []\n        for api_data in apis:\n            try:\n                tool_func = self._create_tool(api_data)\n                tools.append(tool_func)\n            except Exception as e:\n                print(f\"Error creating tool: {e}\")\n                continue  # Skip failed tools\n\n        return tools\n\n    def _create_tool(self, api_data: dict[str, Any]) -> Callable:\n        \"\"\"Create a tool from API data.\"\"\"\n        api_id = api_data.get(\"id\", \"\")\n        api_name = api_data.get(\"title\", f\"api_{api_id}\")\n        description = api_data.get(\"description\", f\"{api_data.get('api_type', 'GET')} {api_data.get('path', '/')}\")\n\n        @tool(description=description)\n        def api_tool(**kwargs) -> dict[str, Any]:\n            \"\"\"Dynamically created ZAPI tool for API calls.\"\"\"\n            return self._call_api(api_id, api_data, kwargs)\n\n        # Set the tool name (clean it for use as function name)\n        clean_name = api_name.lower().replace(\" \", \"_\").replace(\"-\", \"_\").replace(\"/\", \"_\")\n        # Remove any non-alphanumeric characters except underscores\n        clean_name = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in clean_name)\n        # Ensure it starts with a letter or underscore\n        if clean_name and not (clean_name[0].isalpha() or clean_name[0] == \"_\"):\n            clean_name = f\"api_{clean_name}\"\n\n        api_tool.name = clean_name or f\"api_{api_id}\"\n\n        return api_tool\n\n    def _call_api(self, api_id: str, api_data: dict[str, Any], params: dict[str, Any]) -> dict[str, Any]:\n        \"\"\"Make the actual API call with comprehensive error handling.\"\"\"\n        import logging\n\n        method = api_data.get(\"api_type\", \"GET\")  # Use 'api_type' instead of 'method'\n        path = api_data.get(\"path\", \"/\")\n        base_url = api_data.get(\"base_url\", \"\") or os.getenv(\"YOUR_API_BASE_URL\", \"\")\n\n        # Validate base_url\n        if not base_url:\n            return {\n                \"error\": True,\n                \"error_type\": \"configuration_error\",\n                \"message\": \"No base URL configured for API call\",\n                \"details\": \"Either set base_url in API configuration or YOUR_API_BASE_URL environment variable\",\n                \"api_id\": api_id,\n                \"path\": path,\n            }\n\n        # Build URL\n        url = f\"{base_url.rstrip('/')}{path}\"\n\n        # Replace path parameters\n        for key, value in params.items():\n            url = url.replace(f\"{{{key}}}\", str(value))\n\n        # Prepare request\n        headers = {}\n        data = None\n\n        # Add security headers from loaded configuration\n        headers.update(self.security_headers)\n\n        # Set data for POST/PUT\n        if method.upper() in [\"POST\", \"PUT\"]:\n            data = {k: v for k, v in params.items() if f\"{{{k}}}\" not in api_data.get(\"path\", \"\")}\n\n        # Log request details\n        logging.info(f\"API Call - {method.upper()} {url}\")\n        if data:\n            logging.debug(f\"Request data: {data}\")\n\n        # Make request\n        response = None\n        try:\n            response = requests.request(\n                method=method, url=url, headers=headers, json=data if data else None, timeout=30\n            )\n\n            # Log response details\n            logging.info(f\"API Response - Status: {response.status_code}\")\n\n            # Handle successful responses (2xx)\n            if 200 <= response.status_code < 300:\n                try:\n                    return response.json() if response.content else {\"status\": \"success\"}\n                except ValueError as e:\n                    # JSON parsing failed but status was successful\n                    logging.warning(f\"JSON parsing failed for successful response: {str(e)}\")\n                    return {\n                        \"status\": \"success\",\n                        \"raw_response\": response.text,\n                        \"content_type\": response.headers.get(\"content-type\", \"unknown\"),\n                        \"warning\": f\"Response not valid JSON: {str(e)}\",\n                    }\n\n            # Handle client errors (4xx) and server errors (5xx)\n            else:\n                error_response = {\n                    \"error\": True,\n                    \"status_code\": response.status_code,\n                    \"status_text\": response.reason,\n                    \"url\": url,\n                    \"method\": method.upper(),\n                }\n\n                # Try to get JSON error response\n                try:\n                    error_response[\"response\"] = response.json()\n                except ValueError:\n                    # Not JSON, capture raw text\n                    error_response[\"raw_response\"] = response.text\n\n                # Add response headers that might be useful\n                useful_headers = [\"content-type\", \"www-authenticate\", \"retry-after\", \"x-ratelimit-remaining\"]\n                response_headers = {k: v for k, v in response.headers.items() if k.lower() in useful_headers}\n                if response_headers:\n                    error_response[\"headers\"] = response_headers\n\n                logging.error(f\"API Error - {response.status_code}: {error_response}\")\n                return error_response\n\n        except requests.exceptions.Timeout as e:\n            error_response = {\n                \"error\": True,\n                \"error_type\": \"timeout\",\n                \"message\": \"Request timed out after 30 seconds\",\n                \"url\": url,\n                \"method\": method.upper(),\n                \"details\": str(e),\n            }\n            logging.error(f\"API Timeout: {error_response}\")\n            return error_response\n\n        except requests.exceptions.ConnectionError as e:\n            error_response = {\n                \"error\": True,\n                \"error_type\": \"connection_error\",\n                \"message\": \"Failed to connect to the API endpoint\",\n                \"url\": url,\n                \"method\": method.upper(),\n                \"details\": str(e),\n            }\n            logging.error(f\"API Connection Error: {error_response}\")\n            return error_response\n\n        except requests.exceptions.HTTPError as e:\n            error_response = {\n                \"error\": True,\n                \"error_type\": \"http_error\",\n                \"message\": \"HTTP error occurred\",\n                \"url\": url,\n                \"method\": method.upper(),\n                \"details\": str(e),\n            }\n            if response:\n                error_response[\"status_code\"] = response.status_code\n                error_response[\"status_text\"] = response.reason\n            logging.error(f\"API HTTP Error: {error_response}\")\n            return error_response\n\n        except requests.exceptions.RequestException as e:\n            error_response = {\n                \"error\": True,\n                \"error_type\": \"request_error\",\n                \"message\": \"Request failed\",\n                \"url\": url,\n                \"method\": method.upper(),\n                \"details\": str(e),\n            }\n            logging.error(f\"API Request Error: {error_response}\")\n            return error_response\n\n        except Exception as e:\n            error_response = {\n                \"error\": True,\n                \"error_type\": \"unexpected_error\",\n                \"message\": \"An unexpected error occurred\",\n                \"url\": url,\n                \"method\": method.upper(),\n                \"details\": str(e),\n                \"exception_type\": type(e).__name__,\n            }\n            logging.error(f\"API Unexpected Error: {error_response}\")\n            return error_response\n"
  },
  {
    "path": "zapi/providers.py",
    "content": "\"\"\"LLM Provider enums and validation utilities.\n\nZAPI supports a generic key-value approach for LLM API keys, allowing developers\nto bring their own keys for any provider. We support 4 main providers with\nfull validation and optimized integration.\n\nCurrently supported providers:\n- Anthropic, OpenAI, Google, Groq (main supported providers)\n\"\"\"\n\nfrom enum import Enum\n\nfrom .exceptions import LLMKeyError\n\n\nclass LLMProvider(Enum):\n    \"\"\"\n    Supported LLM providers for API key management.\n\n    ZAPI supports 4 main LLM providers with optimized integration and validation.\n    Each provider has specific API key format validation.\n    \"\"\"\n\n    # Main supported providers\n    ANTHROPIC = \"anthropic\"\n    OPENAI = \"openai\"\n    GOOGLE = \"google\"\n    GROQ = \"groq\"\n\n    @classmethod\n    def get_all_providers(cls) -> set[str]:\n        \"\"\"Get all supported provider names.\"\"\"\n        return {provider.value for provider in cls}\n\n    @classmethod\n    def is_valid_provider(cls, provider: str) -> bool:\n        \"\"\"Check if a provider name is valid.\"\"\"\n        return provider.lower() in cls.get_all_providers()\n\n\ndef validate_llm_keys(llm_keys: dict[str, str]) -> dict[str, str]:\n    \"\"\"\n    Validate LLM keys dictionary for supported providers.\n\n    Supports the 4 main LLM providers with specific validation for each.\n\n    Args:\n        llm_keys: Dictionary mapping provider names to API keys\n                 Example: {\"anthropic\": \"sk-ant-...\", \"openai\": \"sk-...\", \"groq\": \"gsk_...\"}\n\n    Returns:\n        Validated and normalized keys dictionary\n\n    Raises:\n        LLMKeyError: If keys format is invalid or providers are unsupported\n    \"\"\"\n    if not isinstance(llm_keys, dict):\n        raise LLMKeyError(\"llm_keys must be a dictionary\")\n\n    if not llm_keys:\n        raise LLMKeyError(\"llm_keys cannot be empty\")\n\n    validated_keys = {}\n\n    supported_providers = \", \".join(LLMProvider.get_all_providers())\n\n    for provider, api_key in llm_keys.items():\n        # Normalize provider name to lowercase\n        provider_normalized = provider.lower()\n\n        # Validate provider is supported\n        if not LLMProvider.is_valid_provider(provider_normalized):\n            raise LLMKeyError(f\"Unsupported LLM provider: '{provider}'. Supported providers: {supported_providers}\")\n\n        # Validate API key format\n        if not isinstance(api_key, str) or not api_key.strip():\n            raise LLMKeyError(f\"API key for provider '{provider}' must be a non-empty string\")\n\n        _validate_key_format(provider_normalized, api_key.strip())\n\n        validated_keys[provider_normalized] = api_key.strip()\n\n    return validated_keys\n\n\ndef _validate_key_format(provider: str, api_key: str) -> None:\n    \"\"\"\n    Validate API key format for specific providers.\n\n    All 4 main providers receive specific validation tailored to their API key formats.\n\n    Args:\n        provider: Provider name (normalized to lowercase)\n        api_key: API key to validate\n\n    Raises:\n        LLMKeyError: If key format is invalid for the provider\n    \"\"\"\n    # Main supported providers - specific validation for each\n    if provider == LLMProvider.ANTHROPIC.value:\n        if not api_key.startswith(\"sk-ant-\"):\n            raise LLMKeyError(\"Anthropic API keys must start with 'sk-ant-'\")\n        if len(api_key) < 20:\n            raise LLMKeyError(\"Anthropic API keys must be at least 20 characters long\")\n\n    elif provider == LLMProvider.OPENAI.value:\n        if not api_key.startswith(\"sk-\"):\n            raise LLMKeyError(\"OpenAI API keys must start with 'sk-'\")\n        if len(api_key) < 20:\n            raise LLMKeyError(\"OpenAI API keys must be at least 20 characters long\")\n\n    elif provider == LLMProvider.GOOGLE.value:\n        # Google API keys are typically 39 characters and alphanumeric + hyphens\n        if len(api_key) < 20:\n            raise LLMKeyError(\"Google API keys must be at least 20 characters long\")\n\n    elif provider == LLMProvider.GROQ.value:\n        if not api_key.startswith(\"gsk_\"):\n            raise LLMKeyError(\"Groq API keys must start with 'gsk_'\")\n        if len(api_key) < 20:\n            raise LLMKeyError(\"Groq API keys must be at least 20 characters long\")\n\n    # Generic validation for all providers\n    if len(api_key) < 10:\n        raise LLMKeyError(f\"API key for {provider} is too short (minimum 10 characters)\")\n\n    # Additional validation: ensure key contains only valid characters\n    if not api_key.replace(\"-\", \"\").replace(\"_\", \"\").replace(\".\", \"\").isalnum():\n        raise LLMKeyError(f\"API key for {provider} contains invalid characters\")\n\n\ndef get_provider_display_name(provider: str) -> str:\n    \"\"\"\n    Get human-readable display name for provider.\n\n    Returns display names for the 4 main supported providers.\n\n    Args:\n        provider: Provider name (normalized)\n\n    Returns:\n        Display name for the provider\n    \"\"\"\n    display_names = {\n        # Main supported providers\n        LLMProvider.ANTHROPIC.value: \"Anthropic\",\n        LLMProvider.OPENAI.value: \"OpenAI\",\n        LLMProvider.GOOGLE.value: \"Google\",\n        LLMProvider.GROQ.value: \"Groq\",\n    }\n    return display_names.get(provider, provider.title())\n\n\ndef is_primary_provider(provider: str) -> bool:\n    \"\"\"\n    Check if provider is the primary supported provider.\n\n    Args:\n        provider: Provider name (normalized)\n\n    Returns:\n        True if provider is primary supported (Anthropic), False otherwise\n    \"\"\"\n    return provider.lower() == LLMProvider.ANTHROPIC.value\n\n\ndef get_supported_providers_info() -> dict[str, dict[str, str]]:\n    \"\"\"\n    Get information about the 4 main supported providers.\n\n    Returns:\n        Dictionary with provider info including support level\n    \"\"\"\n    return {\n        \"anthropic\": {\n            \"display_name\": \"Anthropic\",\n            \"support_level\": \"primary\",\n            \"description\": \"Primary supported provider with complete validation\",\n        },\n        \"openai\": {\n            \"display_name\": \"OpenAI\",\n            \"support_level\": \"main\",\n            \"description\": \"Fully supported with complete validation\",\n        },\n        \"google\": {\n            \"display_name\": \"Google\",\n            \"support_level\": \"main\",\n            \"description\": \"Fully supported with complete validation\",\n        },\n        \"groq\": {\n            \"display_name\": \"Groq\",\n            \"support_level\": \"main\",\n            \"description\": \"Fully supported with complete validation\",\n        },\n    }\n"
  },
  {
    "path": "zapi/session.py",
    "content": "\"\"\"BrowserSession implementation with Playwright integration.\"\"\"\n\nimport asyncio\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nfrom playwright.async_api import (\n    Browser,\n    BrowserContext,\n    Page,\n    Playwright,\n    async_playwright,\n)\nfrom playwright.async_api import (\n    Error as PlaywrightError,\n)\nfrom playwright.async_api import (\n    TimeoutError as PlaywrightTimeoutError,\n)\n\n\ndef _run_async(coro):\n    \"\"\"Helper to run async coroutines synchronously.\"\"\"\n    try:\n        loop = asyncio.get_event_loop()\n    except RuntimeError:\n        loop = asyncio.new_event_loop()\n        asyncio.set_event_loop(loop)\n\n    if loop.is_running():\n        # If we're already in an async context, just return the coroutine\n        return coro\n    else:\n        # Run synchronously\n        return loop.run_until_complete(coro)\n\n\nclass BrowserSessionError(Exception):\n    \"\"\"Base exception for browser session errors.\"\"\"\n\n    pass\n\n\nclass BrowserNavigationError(BrowserSessionError):\n    \"\"\"Navigation-related browser errors.\"\"\"\n\n    pass\n\n\nclass BrowserInitializationError(BrowserSessionError):\n    \"\"\"Browser initialization errors.\"\"\"\n\n    pass\n\n\nclass BrowserSession:\n    \"\"\"\n    Manages a Playwright browser session with HAR recording and network log capture.\n\n    This class handles browser lifecycle, authentication injection, navigation,\n    and HAR file export for API discovery.\n    \"\"\"\n\n    def __init__(self, auth_token: str, headless: bool = True, **playwright_options):\n        \"\"\"\n        Initialize a browser session.\n\n        Args:\n            auth_token: Authentication token to inject via Authorization header\n            headless: Whether to run browser in headless mode\n            **playwright_options: Additional options for Playwright browser launch\n        \"\"\"\n        self.auth_token = auth_token\n        self.headless = headless\n        self.playwright_options = playwright_options\n\n        self._playwright: Optional[Playwright] = None\n        self._browser: Optional[Browser] = None\n        self._context: Optional[BrowserContext] = None\n        self._page: Optional[Page] = None\n        self._har_path: Optional[Path] = None\n\n    async def _initialize(self, initial_url: Optional[str] = None, wait_until: str = \"load\"):\n        \"\"\"\n        Initialize Playwright browser, context, and page.\n\n        Args:\n            initial_url: Optional initial URL to navigate to\n            wait_until: When to consider navigation complete (default: \"load\")\n\n        Raises:\n            BrowserInitializationError: If browser initialization fails\n            BrowserNavigationError: If initial navigation fails\n        \"\"\"\n        try:\n            # Start Playwright\n            self._playwright = await async_playwright().start()\n\n            # Launch browser with enhanced error handling\n            try:\n                # Add stealth args if not present\n                launch_options = self.playwright_options.copy()\n                args = launch_options.get(\"args\", [])\n                if \"--disable-blink-features=AutomationControlled\" not in args:\n                    args.append(\"--disable-blink-features=AutomationControlled\")\n                launch_options[\"args\"] = args\n\n                # Default to a more realistic viewport if not specified\n                if \"viewport\" not in launch_options:\n                    # None means resize to window size\n                    pass\n\n                self._browser = await self._playwright.chromium.launch(headless=self.headless, **launch_options)\n            except Exception as e:\n                raise BrowserInitializationError(\n                    f\"Failed to launch browser: {str(e)}. \"\n                    \"This may be due to missing browser dependencies or system restrictions.\"\n                )\n\n            # Create temporary HAR file path\n            import tempfile\n\n            self._har_path = Path(tempfile.mktemp(suffix=\".har\"))\n\n            # Create context with HAR recording\n            try:\n                # Use a realistic User-Agent\n                user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36\"\n\n                self._context = await self._browser.new_context(\n                    record_har_path=str(self._har_path),\n                    record_har_mode=\"minimal\",\n                    user_agent=user_agent,\n                    viewport={\"width\": 1280, \"height\": 720},  # Set a standard viewport\n                    device_scale_factor=2,\n                    locale=\"en-US\",\n                    timezone_id=\"America/New_York\",\n                )\n\n                # Add stealth scripts to evade bot detection\n                await self._context.add_init_script(\"\"\"\n                    Object.defineProperty(navigator, 'webdriver', {\n                        get: () => undefined\n                    });\n\n                    // Pass the Chrome Test\n                    window.navigator.chrome = {\n                        runtime: {},\n                    };\n\n                    // Pass the Plugins Length Test\n                    Object.defineProperty(navigator, 'plugins', {\n                        get: () => [1, 2, 3, 4, 5],\n                    });\n\n                    // Pass the Languages Test\n                    Object.defineProperty(navigator, 'languages', {\n                        get: () => ['en-US', 'en'],\n                    });\n                \"\"\")\n            except Exception as e:\n                raise BrowserInitializationError(f\"Failed to create browser context: {str(e)}\")\n\n                pass\n                # Original auth injection code removed to prevent CORS issues on public sites\n                # auth_handler = get_auth_handler(\"header\")\n                # await auth_handler(self._context, self.auth_token)\n\n            # Create page\n            try:\n                self._page = await self._context.new_page()\n            except Exception as e:\n                raise BrowserInitializationError(f\"Failed to create browser page: {str(e)}\")\n\n            # Navigate to initial URL if provided\n            if initial_url:\n                await self._navigate_async(initial_url, wait_until=wait_until)\n\n        except (BrowserInitializationError, BrowserNavigationError):\n            # Re-raise our custom exceptions\n            raise\n        except Exception as e:\n            # Catch any other unexpected errors\n            raise BrowserInitializationError(f\"Unexpected error during browser initialization: {str(e)}\")\n\n    async def _navigate_async(self, url: str, wait_until: str = \"load\") -> None:\n        \"\"\"\n        Internal async navigate method with enhanced error handling.\n\n        Args:\n            url: URL to navigate to\n            wait_until: When to consider navigation complete\n                       (\"load\", \"domcontentloaded\", \"networkidle\")\n\n        Raises:\n            BrowserNavigationError: If navigation fails\n        \"\"\"\n        if not self._page:\n            raise BrowserSessionError(\"Browser session not initialized. Call _initialize() first.\")\n\n        try:\n            # Navigate with Authorization header already set\n            await self._page.goto(url, wait_until=wait_until, timeout=30000)  # 30 second timeout\n\n        except PlaywrightTimeoutError:\n            raise BrowserNavigationError(\n                f\"Navigation timeout: '{url}' took too long to load. \"\n                \"The website may be slow or unresponsive. Try again or use a different URL.\"\n            )\n        except PlaywrightError as e:\n            error_message = str(e)\n\n            if \"Cannot navigate to invalid URL\" in error_message:\n                raise BrowserNavigationError(\n                    f\"Invalid URL format: '{url}'. \"\n                    \"Please ensure the URL is properly formatted (e.g., 'https://example.com').\"\n                )\n            elif \"net::ERR_NAME_NOT_RESOLVED\" in error_message:\n                raise BrowserNavigationError(\n                    f\"Domain name could not be resolved: '{url}'. \"\n                    \"Please check the URL spelling and your internet connection.\"\n                )\n            elif \"net::ERR_CONNECTION_REFUSED\" in error_message:\n                raise BrowserNavigationError(\n                    f\"Connection refused: '{url}'. The server may be down or the URL may be incorrect.\"\n                )\n            elif \"net::ERR_CONNECTION_TIMED_OUT\" in error_message:\n                raise BrowserNavigationError(\n                    f\"Connection timed out: '{url}'. The server took too long to respond. Please try again.\"\n                )\n            elif \"net::ERR_INTERNET_DISCONNECTED\" in error_message:\n                raise BrowserNavigationError(\"No internet connection detected. Please check your network connection.\")\n            elif \"net::ERR_CERT_AUTHORITY_INVALID\" in error_message:\n                raise BrowserNavigationError(\n                    f\"SSL certificate error for: '{url}'. The website's security certificate is invalid or expired.\"\n                )\n            else:\n                raise BrowserNavigationError(f\"Navigation failed for '{url}': {error_message}\")\n        except Exception as e:\n            raise BrowserNavigationError(f\"Unexpected navigation error for '{url}': {str(e)}\")\n\n    def navigate(self, url: str, wait_until: str = \"load\") -> None:\n        \"\"\"\n        Navigate to a URL with authentication injection.\n\n        Args:\n            url: URL to navigate to\n            wait_until: When to consider navigation complete\n                       (\"load\", \"domcontentloaded\", \"networkidle\")\n\n        Raises:\n            BrowserNavigationError: If navigation fails\n        \"\"\"\n        _run_async(self._navigate_async(url, wait_until))\n\n    async def _click_async(self, selector: str, **kwargs) -> None:\n        \"\"\"\n        Internal async click method with error handling.\n\n        Raises:\n            BrowserSessionError: If click operation fails\n        \"\"\"\n        if not self._page:\n            raise BrowserSessionError(\"Browser session not initialized.\")\n\n        try:\n            await self._page.click(selector, **kwargs)\n        except PlaywrightTimeoutError:\n            raise BrowserSessionError(\n                f\"Element not found or not clickable: '{selector}'. \"\n                \"Please check the selector or wait for the page to load completely.\"\n            )\n        except PlaywrightError as e:\n            raise BrowserSessionError(f\"Click failed for selector '{selector}': {str(e)}\")\n\n    def click(self, selector: str, **kwargs) -> None:\n        \"\"\"\n        Click an element by selector.\n\n        Args:\n            selector: CSS selector for the element\n            **kwargs: Additional options for Playwright click\n        \"\"\"\n        _run_async(self._click_async(selector, **kwargs))\n\n    async def _fill_async(self, selector: str, value: str, **kwargs) -> None:\n        \"\"\"\n        Internal async fill method with error handling.\n\n        Raises:\n            BrowserSessionError: If fill operation fails\n        \"\"\"\n        if not self._page:\n            raise BrowserSessionError(\"Browser session not initialized.\")\n\n        try:\n            await self._page.fill(selector, value, **kwargs)\n        except PlaywrightTimeoutError:\n            raise BrowserSessionError(\n                f\"Input element not found: '{selector}'. \"\n                \"Please check the selector or wait for the page to load completely.\"\n            )\n        except PlaywrightError as e:\n            raise BrowserSessionError(f\"Fill failed for selector '{selector}': {str(e)}\")\n\n    def fill(self, selector: str, value: str, **kwargs) -> None:\n        \"\"\"\n        Fill a form field.\n\n        Args:\n            selector: CSS selector for the input element\n            value: Value to fill\n            **kwargs: Additional options for Playwright fill\n        \"\"\"\n        _run_async(self._fill_async(selector, value, **kwargs))\n\n    async def _wait_for_async(self, selector: Optional[str] = None, timeout: Optional[float] = None) -> None:\n        \"\"\"\n        Internal async wait_for method with error handling.\n\n        Raises:\n            BrowserSessionError: If wait operation fails\n        \"\"\"\n        if not self._page:\n            raise BrowserSessionError(\"Browser session not initialized.\")\n\n        if selector:\n            try:\n                await self._page.wait_for_selector(selector, timeout=timeout)\n            except PlaywrightTimeoutError:\n                raise BrowserSessionError(\n                    f\"Element not found within timeout: '{selector}'. \"\n                    \"The element may not exist or may take longer to appear.\"\n                )\n            except PlaywrightError as e:\n                raise BrowserSessionError(f\"Wait failed for selector '{selector}': {str(e)}\")\n        elif timeout:\n            try:\n                await self._page.wait_for_timeout(timeout)\n            except Exception as e:\n                raise BrowserSessionError(f\"Wait timeout failed: {str(e)}\")\n        else:\n            raise BrowserSessionError(\"Must provide either selector or timeout\")\n\n    def wait_for(self, selector: Optional[str] = None, timeout: Optional[float] = None) -> None:\n        \"\"\"\n        Wait for a selector or timeout.\n\n        Args:\n            selector: CSS selector to wait for (if None, waits for timeout)\n            timeout: Timeout in milliseconds\n        \"\"\"\n        _run_async(self._wait_for_async(selector, timeout))\n\n    async def _dump_logs_async(self, filepath: Union[str, Path]) -> None:\n        \"\"\"\n        Internal async dump_logs method with error handling.\n\n        Raises:\n            BrowserSessionError: If log dumping fails\n        \"\"\"\n        if not self._context:\n            raise BrowserSessionError(\"Browser session not initialized.\")\n\n        try:\n            # Close context to finalize HAR recording\n            await self._context.close()\n        except Exception as e:\n            raise BrowserSessionError(f\"Failed to close browser context: {str(e)}\")\n\n        # Copy HAR file to destination with enhanced error handling\n        try:\n            if self._har_path and self._har_path.exists():\n                import shutil\n\n                # Ensure destination directory exists\n                dest_path = Path(filepath)\n                dest_path.parent.mkdir(parents=True, exist_ok=True)\n\n                shutil.copy(self._har_path, filepath)\n\n                # Verify the copy was successful\n                if not dest_path.exists():\n                    raise BrowserSessionError(f\"Failed to create HAR file at: '{filepath}'\")\n\n                # Provide immediate feedback about HAR size post-save\n                file_size_mb = dest_path.stat().st_size / (1024 * 1024)\n                print(f\"HAR file saved to '{dest_path}' ({file_size_mb:.1f} MB)\")\n                if file_size_mb > 100:\n                    print(\"⚠️  Large HAR files (>100 MB) may lead to unexpected upload issues.\")\n                    print(\n                        \"   Consider using the filtering utilities in 'zapi.har_processing' to trim the HAR before uploading.\"\n                    )\n\n                # Clean up temporary file\n                self._har_path.unlink()\n            else:\n                raise BrowserSessionError(\n                    \"HAR file not found. Session may not have been properly initialized \"\n                    \"or no network activity was recorded.\"\n                )\n        except PermissionError:\n            raise BrowserSessionError(\n                f\"Permission denied writing to: '{filepath}'. Please check file permissions and directory access.\"\n            )\n        except FileNotFoundError:\n            raise BrowserSessionError(f\"Destination directory does not exist: '{Path(filepath).parent}'\")\n        except Exception as e:\n            raise BrowserSessionError(f\"Failed to save HAR file to '{filepath}': {str(e)}\")\n\n        # Mark context as closed\n        self._context = None\n        self._page = None\n\n    def dump_logs(self, filepath: Union[str, Path]) -> None:\n        \"\"\"\n        Export captured network logs to a HAR file.\n\n        Args:\n            filepath: Path where to save the HAR file\n        \"\"\"\n        _run_async(self._dump_logs_async(filepath))\n\n    async def _close_async(self) -> None:\n        \"\"\"Internal async close method.\"\"\"\n        if self._context:\n            await self._context.close()\n\n        if self._browser:\n            await self._browser.close()\n\n        if self._playwright:\n            await self._playwright.stop()\n\n        # Clean up temporary HAR file if it exists\n        if self._har_path and self._har_path.exists():\n            self._har_path.unlink()\n\n        self._page = None\n        self._context = None\n        self._browser = None\n        self._playwright = None\n\n    def close(self) -> None:\n        \"\"\"\n        Close the browser session and cleanup resources.\n        \"\"\"\n        _run_async(self._close_async())\n\n    def __enter__(self):\n        \"\"\"Context manager entry.\"\"\"\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        \"\"\"Context manager exit.\"\"\"\n        self.close()\n        return False\n\n    async def __aenter__(self):\n        \"\"\"Async context manager entry.\"\"\"\n        return self\n\n    async def __aexit__(self, exc_type, exc_val, exc_tb):\n        \"\"\"Async context manager exit.\"\"\"\n        await self._close_async()\n        return False\n"
  },
  {
    "path": "zapi/utils.py",
    "content": "\"\"\"Utility functions for ZAPI.\"\"\"\n\nimport json\nimport os\nfrom typing import Any, Optional\n\ntry:\n    from dotenv import load_dotenv\n    from pydantic import SecretStr\n\n    HAS_DOTENV = True\nexcept ImportError:\n    HAS_DOTENV = False\n    SecretStr = str  # Fallback to regular string if pydantic not available\n\n\ndef load_security_headers(headers_file: Optional[str] = None) -> dict[str, str]:\n    \"\"\"\n    Load security headers from JSON file.\n\n    Args:\n        headers_file: Path to JSON file containing headers. If None, uses\n                     'api-headers.json' in the zapi root directory.\n\n    Returns:\n        Dictionary of headers to add to API requests\n    \"\"\"\n    if headers_file is None:\n        # Always use the same fixed location: zapi/api-headers.json\n        headers_file = \"api-headers.json\"\n\n    if not os.path.exists(headers_file):\n        print(f\"ℹ️  No headers file found at '{headers_file}' - proceeding without authentication headers\")\n        return {}\n\n    try:\n        with open(headers_file) as f:\n            data = json.load(f)\n            headers = data.get(\"headers\", {})\n            if headers:\n                print(f\"✅ Loaded {len(headers)} security headers from '{headers_file}'\")\n                # Don't print the actual headers for security\n                header_names = list(headers.keys())\n                print(f\"   Headers: {', '.join(header_names)}\")\n            else:\n                print(f\"⚠️  Headers file '{headers_file}' found but contains no headers\")\n            return headers\n    except (OSError, json.JSONDecodeError) as e:\n        print(f\"⚠️  Error loading headers file '{headers_file}': {e}\")\n        print(\"   Proceeding without authentication headers\")\n        return {}\n\n\ndef load_adopt_credentials() -> tuple[Optional[str], Optional[str]]:\n    \"\"\"\n    Load ADOPT credentials from .env file or fallback to code defaults.\n\n    Returns:\n        Tuple of (client_id, secret) where values are loaded from environment\n\n    Note:\n        Requires python-dotenv to be installed for full functionality.\n        Falls back gracefully if these packages are not available.\n    \"\"\"\n    if not HAS_DOTENV:\n        print(\"⚠️  python-dotenv not installed - using fallback credential loading\")\n        return None, None\n\n    # Try to load from .env file\n    load_dotenv()\n\n    # Check environment variables first\n    env_client_id = os.getenv(\"ADOPT_CLIENT_ID\")\n    env_secret = os.getenv(\"ADOPT_SECRET_KEY\")\n\n    if env_client_id and env_secret:\n        print(\"✓ Loaded ADOPT credentials from .env file\")\n        return env_client_id, env_secret\n\n    print(\"ℹ️  No ADOPT credentials found in .env file\")\n    return None, None\n\n\ndef load_llm_credentials() -> tuple[Optional[str], Optional[str], Optional[str]]:\n    \"\"\"\n    Load LLM credentials from .env file or fallback to code defaults.\n\n    Returns:\n        Tuple of (provider, api_key) where api_key is properly handled for security\n\n    Note:\n        Requires pydantic and python-dotenv to be installed for full functionality.\n        Falls back gracefully if these packages are not available.\n    \"\"\"\n    if not HAS_DOTENV:\n        print(\"⚠️  pydantic/python-dotenv not installed - using fallback credential loading\")\n        return None, None, None\n\n    # Try to load from .env file\n    load_dotenv()\n\n    # Check environment variables first\n    env_llm_provider = os.getenv(\"LLM_PROVIDER\")\n    env_llm_api_key = os.getenv(\"LLM_API_KEY\")\n    env_llm_model_name = os.getenv(\"LLM_MODEL_NAME\")\n\n    if env_llm_provider and env_llm_api_key and env_llm_model_name:\n        print(f\"✓ Loaded LLM credentials from .env file (provider: {env_llm_provider})\")\n        # Return string directly - SecretStr handling is done in demo.py\n        return env_llm_provider, env_llm_api_key, env_llm_model_name\n\n    print(\"ℹ️  No LLM credentials found in .env file\")\n    return None, None, None\n\n\ndef load_zapi_credentials() -> tuple[str, str, str, str, str]:\n    \"\"\"\n    Load complete ZAPI credentials (ADOPT + LLM) from environment variables with fallbacks.\n\n    This is a convenience function that combines load_adopt_credentials() and load_llm_credentials()\n    with sensible fallback values for development/examples.\n\n    Returns:\n        Tuple of (client_id, secret, llm_provider, llm_model_name, llm_api_key)\n\n    Note:\n        If environment variables are not found, returns fallback placeholder values\n        suitable for examples and development.\n    \"\"\"\n    # Load ADOPT credentials securely from .env or fallback to code\n    print(\"🔐 Loading ADOPT credentials...\")\n    client_id, secret = load_adopt_credentials()\n\n    # Fallback to hardcoded values if not found in .env\n    if not client_id or not secret:\n        print(\"⚠️  Using fallback credentials - update your .env file for production\")\n        client_id = \"YOUR_CLIENT_ID\"\n        secret = \"YOUR_SECRET\"\n\n    # Load LLM credentials securely from .env or fallback to code\n    print(\"🔐 Loading LLM credentials...\")\n    llm_provider, llm_api_key, llm_model_name = load_llm_credentials()\n\n    # Fallback to hardcoded values if not found in .env\n    if not llm_provider or not llm_api_key or not llm_model_name:\n        print(\"⚠️  Using fallback LLM credentials - update your .env file for production\")\n        llm_provider = llm_provider or \"anthropic\"\n        llm_model_name = llm_model_name or \"claude-3-5-sonnet-20241022\"\n        llm_api_key = llm_api_key or \"YOUR_ANTHROPIC_API_KEY\"\n\n    return client_id, secret, llm_provider, llm_model_name, llm_api_key\n\n\ndef set_llm_api_key_env(provider: str, api_key: str) -> None:\n    \"\"\"\n    Set the appropriate environment variable for the given LLM provider.\n\n    This is required for LangChain v1.0 to automatically detect and use the API keys.\n\n    Args:\n        provider: The LLM provider name ('anthropic' or 'openai')\n        api_key: The API key to set in the environment\n\n    Raises:\n        ValueError: If the provider is not supported\n    \"\"\"\n    if provider == \"anthropic\":\n        os.environ[\"ANTHROPIC_API_KEY\"] = api_key\n    elif provider == \"openai\":\n        os.environ[\"OPENAI_API_KEY\"] = api_key\n    else:\n        raise ValueError(f\"Unsupported provider: {provider}. Supported providers: anthropic, openai\")\n\n\ndef _safe_get(obj: Any, *keys: str, default: Any = None) -> Any:\n    \"\"\"\n    Safely get a value from an object or dict using multiple possible keys.\n    Tries object attributes first, then dict keys.\n\n    Args:\n        obj: Object or dict to get value from\n        *keys: Multiple possible keys/attributes to try\n        default: Default value if none found\n\n    Returns:\n        First found value or default\n    \"\"\"\n    for key in keys:\n        if hasattr(obj, key):\n            value = getattr(obj, key, None)\n            if value is not None:\n                return value\n        if isinstance(obj, dict) and key in obj:\n            value = obj[key]\n            if value is not None:\n                return value\n    return default\n\n\ndef _extract_token_metadata(response: Any) -> Optional[str]:\n    \"\"\"\n    Extract token usage metadata from agent response.\n\n    Args:\n        response: The response object from the agent\n\n    Returns:\n        Formatted token usage string or None if no token info found\n    \"\"\"\n    try:\n        # Get usage metadata from last message\n        if not isinstance(response, dict) or not response.get(\"messages\"):\n            return None\n\n        usage = getattr(response[\"messages\"][-1], \"usage_metadata\", None)\n        if not usage:\n            return None\n\n        # Extract token values (filtering None values)\n        token_info = {\n            \"input\": _safe_get(usage, \"input_tokens\"),\n            \"output\": _safe_get(usage, \"output_tokens\"),\n            \"total\": _safe_get(usage, \"total_tokens\"),\n        }\n        token_info = {k: v for k, v in token_info.items() if v is not None}\n\n        if not token_info:\n            return None\n\n        # Calculate total if missing\n        if \"total\" not in token_info and \"input\" in token_info and \"output\" in token_info:\n            token_info[\"total\"] = token_info[\"input\"] + token_info[\"output\"]\n\n        # Format output\n        labels = {\"input\": \"Input\", \"output\": \"Output\", \"total\": \"Total\"}\n        return \"Tokens - \" + \" | \".join(f\"{labels[k]}: {token_info[k]}\" for k in labels if k in token_info)\n\n    except Exception:\n        return None\n\n\ndef interactive_chat(agent: Any, single_shot: bool = False, debug_mode: bool = False) -> None:\n    \"\"\"\n    Interactive terminal chat with the agent.\n\n    Args:\n        agent: The LangChain agent instance\n        single_shot: If True, only accepts one prompt and exits\n        debug_mode: If True, shows detailed debug information\n    \"\"\"\n    print(\"\\n💬 Interactive Chat Mode\")\n    print(\"=\" * 25)\n\n    if debug_mode:\n        print(\"🐛 Debug mode: ON\")\n    print(\"Type your question and press Enter\\n\")\n\n    history = []\n    first_interaction = True\n\n    while True:\n        try:\n            # Add divider between questions (except for the first one)\n            if not first_interaction:\n                print(\"─\" * 60)\n                print()\n\n            # Get user input\n            user_input = input(\"You: \").strip()\n\n            # Handle commands\n            if user_input.lower() in [\"exit\", \"quit\"]:\n                print(\"👋 Goodbye!\")\n                break\n            elif user_input.lower() == \"help\":\n                print(\"\\nAvailable commands:\")\n                print(\"- 'exit' or 'quit': Exit the chat\")\n                print(\"- 'history': Show conversation history\")\n                print(\"- 'debug': Toggle debug mode on/off\")\n                print(\"- 'help': Show this help message\")\n                print(\"- Any other text: Ask the agent\\n\")\n                continue\n            elif user_input.lower() == \"debug\":\n                debug_mode = not debug_mode\n                status = \"ON\" if debug_mode else \"OFF\"\n                print(f\"🐛 Debug mode: {status}\\n\")\n                continue\n            elif user_input.lower() == \"history\":\n                if history:\n                    print(\"\\n📜 Conversation History:\")\n                    for i, (q, a) in enumerate(history, 1):\n                        print(f\"{i}. You: {q}\")\n                        print(f\"   Agent: {a[:100]}{'...' if len(a) > 100 else ''}\\n\")\n                else:\n                    print(\"No conversation history yet.\\n\")\n                continue\n            elif not user_input:\n                continue\n\n            # Process with agent\n            print(\"🤖 Agent: \", end=\"\", flush=True)\n            try:\n                if debug_mode:\n                    print(f\"\\n🐛 [DEBUG] Sending request: {user_input}\")\n                    print(f\"🐛 [DEBUG] Agent type: {type(agent)}\")\n\n                response = agent.invoke({\"messages\": [{\"role\": \"user\", \"content\": user_input}]})\n\n                if debug_mode:\n                    print(f\"\\n🐛 [DEBUG] Response type: {type(response)}\")\n                    print(\n                        f\"🐛 [DEBUG] Response keys: {response.keys() if isinstance(response, dict) else 'Not a dict'}\"\n                    )\n\n                    if isinstance(response, dict) and \"messages\" in response:\n                        messages = response[\"messages\"]\n                        print(f\"🐛 [DEBUG] Messages count: {len(messages)}\")\n                        for i, msg in enumerate(messages):\n                            print(f\"🐛 [DEBUG] Message {i}: {type(msg).__name__}\")\n                            if hasattr(msg, \"content\"):\n                                content_preview = (\n                                    str(msg.content)[:100] + \"...\" if len(str(msg.content)) > 100 else str(msg.content)\n                                )\n                                print(f\"🐛 [DEBUG] Content preview: {content_preview}\")\n                            if hasattr(msg, \"tool_calls\") and msg.tool_calls:\n                                print(f\"🐛 [DEBUG] Tool calls: {[tc['name'] for tc in msg.tool_calls]}\")\n                    print()\n\n                # Extract response content\n                if hasattr(response, \"content\"):\n                    # Handle AIMessage or similar objects with content attribute\n                    agent_response = response.content\n                elif isinstance(response, dict) and \"messages\" in response:\n                    # Handle dictionary response with messages array - get last AIMessage\n                    messages = response[\"messages\"]\n                    if messages:\n                        last_message = messages[-1]\n                        agent_response = last_message.content if hasattr(last_message, \"content\") else str(last_message)\n                    else:\n                        agent_response = str(response)\n                elif isinstance(response, dict) and \"content\" in response:\n                    # Handle dictionary response with direct content\n                    agent_response = response[\"content\"]\n                else:\n                    # Fallback to string representation\n                    agent_response = str(response)\n\n                if debug_mode:\n                    print(f\"🐛 [DEBUG] Final response length: {len(str(agent_response))} characters\")\n\n                print(agent_response)\n\n                # Extract and display token metadata\n                token_info = _extract_token_metadata(response)\n                if token_info:\n                    print(f\"\\n📊 {token_info}\")\n\n                # Add spacing between interactions\n                print()\n\n            except Exception as e:\n                if debug_mode:\n                    import traceback\n\n                    print(\"\\n🐛 [DEBUG] Exception details:\")\n                    print(f\"🐛 [DEBUG] Exception type: {type(e)}\")\n                    print(f\"🐛 [DEBUG] Exception message: {str(e)}\")\n                    print(\"🐛 [DEBUG] Traceback:\")\n                    traceback.print_exc()\n                    print()\n                print(f\"❌ Error: {e}\")\n                agent_response = f\"Error: {e}\"\n                # Add spacing after error\n                print()\n\n            # Store in history\n            history.append((user_input, agent_response))\n\n            # Mark that we've had our first interaction\n            first_interaction = False\n\n            # Exit if single shot mode\n            if single_shot:\n                break\n\n        except KeyboardInterrupt:\n            print(\"\\n👋 Goodbye!\")\n            break\n        except Exception as e:\n            print(f\"❌ Error: {e}\")\n            if single_shot:\n                break\n"
  }
]